How to use new_conjecture_data method in hypothesis

Best Python code snippet using hypothesis

engine.py

Source:engine.py Github

copy

Full Screen

...536 # running the test function for real here. If however we encounter537 # some novel behaviour, we try again with the real test function,538 # starting from the new novel prefix that has discovered.539 try:540 trial_data = self.new_conjecture_data(541 prefix=prefix, max_length=max_length542 )543 self.tree.simulate_test_function(trial_data)544 continue545 except PreviouslyUnseenBehaviour:546 pass547 # If the simulation entered part of the tree that has been killed,548 # we don't want to run this.549 if trial_data.observer.killed:550 continue551 # We might have hit the cap on number of examples we should552 # run when calculating the minimal example.553 if not should_generate_more():554 break555 prefix = trial_data.buffer556 else:557 max_length = BUFFER_SIZE558 data = self.new_conjecture_data(prefix=prefix, max_length=max_length)559 self.test_function(data)560 # A thing that is often useful but rarely happens by accident is561 # to generate the same value at multiple different points in the562 # test case.563 #564 # Rather than make this the responsibility of individual strategies565 # we implement a small mutator that just takes parts of the test566 # case with the same label and tries replacing one of them with a567 # copy of the other and tries running it. If we've made a good568 # guess about what to put where, this will run a similar generated569 # test case with more duplication.570 if (571 # An OVERRUN doesn't have enough information about the test572 # case to mutate, so we just skip those.573 data.status >= Status.INVALID574 # This has a tendency to trigger some weird edge cases during575 # generation so we don't let it run until we're done with the576 # health checks.577 and self.health_check_state is None578 ):579 initial_calls = self.call_count580 failed_mutations = 0581 while (582 should_generate_more()583 # We implement fairly conservative checks for how long we584 # we should run mutation for, as it's generally not obvious585 # how helpful it is for any given test case.586 and self.call_count <= initial_calls + 5587 and failed_mutations <= 5588 ):589 groups = defaultdict(list)590 for ex in data.examples:591 groups[ex.label, ex.depth].append(ex)592 groups = [v for v in groups.values() if len(v) > 1]593 if not groups:594 break595 group = self.random.choice(groups)596 ex1, ex2 = sorted(597 self.random.sample(group, 2), key=lambda i: i.index598 )599 assert ex1.end <= ex2.start600 replacements = [data.buffer[e.start : e.end] for e in [ex1, ex2]]601 replacement = self.random.choice(replacements)602 try:603 # We attempt to replace both the the examples with604 # whichever choice we made. Note that this might end605 # up messing up and getting the example boundaries606 # wrong - labels matching are only a best guess as to607 # whether the two are equivalent - but it doesn't608 # really matter. It may not achieve the desired result609 # but it's still a perfectly acceptable choice sequence.610 # to try.611 new_data = self.cached_test_function(612 data.buffer[: ex1.start]613 + replacement614 + data.buffer[ex1.end : ex2.start]615 + replacement616 + data.buffer[ex2.end :],617 # We set error_on_discard so that we don't end up618 # entering parts of the tree we consider redundant619 # and not worth exploring.620 error_on_discard=True,621 extend=BUFFER_SIZE,622 )623 except ContainsDiscard:624 failed_mutations += 1625 continue626 if (627 new_data.status >= data.status628 and data.buffer != new_data.buffer629 and all(630 k in new_data.target_observations631 and new_data.target_observations[k] >= v632 for k, v in data.target_observations.items()633 )634 ):635 data = new_data636 failed_mutations = 0637 else:638 failed_mutations += 1639 # Although the optimisations are logically a distinct phase, we640 # actually normally run them as part of example generation. The641 # reason for this is that we cannot guarantee that optimisation642 # actually exhausts our budget: It might finish running and we643 # discover that actually we still could run a bunch more test cases644 # if we want.645 if (646 self.valid_examples >= max(small_example_cap, optimise_at)647 and not ran_optimisations648 ):649 ran_optimisations = True650 self.optimise_targets()651 def optimise_targets(self):652 """If any target observations have been made, attempt to optimise them653 all."""654 if not self.should_optimise:655 return656 from hypothesis.internal.conjecture.optimiser import Optimiser657 # We want to avoid running the optimiser for too long in case we hit658 # an unbounded target score. We start this off fairly conservatively659 # in case interesting examples are easy to find and then ramp it up660 # on an exponential schedule so we don't hamper the optimiser too much661 # if it needs a long time to find good enough improvements.662 max_improvements = 10663 while True:664 prev_calls = self.call_count665 any_improvements = False666 for target, data in list(self.best_examples_of_observed_targets.items()):667 optimiser = Optimiser(668 self, data, target, max_improvements=max_improvements669 )670 optimiser.run()671 if optimiser.improvements > 0:672 any_improvements = True673 if self.interesting_examples:674 break675 max_improvements *= 2676 if any_improvements:677 continue678 self.pareto_optimise()679 if prev_calls == self.call_count:680 break681 def pareto_optimise(self):682 if self.pareto_front is not None:683 ParetoOptimiser(self).run()684 def _run(self):685 self.reuse_existing_examples()686 self.generate_new_examples()687 # We normally run the targeting phase mixed in with the generate phase,688 # but if we've been asked to run it but not generation then we have to689 # run it explciitly on its own here.690 if Phase.generate not in self.settings.phases:691 self.optimise_targets()692 self.shrink_interesting_examples()693 self.exit_with(ExitReason.finished)694 def new_conjecture_data(self, prefix, max_length=BUFFER_SIZE, observer=None):695 return ConjectureData(696 prefix=prefix,697 max_length=max_length,698 random=self.random,699 observer=observer or self.tree.new_observer(),700 )701 def new_conjecture_data_for_buffer(self, buffer):702 return ConjectureData.for_buffer(buffer, observer=self.tree.new_observer())703 def shrink_interesting_examples(self):704 """If we've found interesting examples, try to replace each of them705 with a minimal interesting example with the same interesting_origin.706 We may find one or more examples with a new interesting_origin707 during the shrink process. If so we shrink these too.708 """709 if Phase.shrink not in self.settings.phases or not self.interesting_examples:710 return711 self.debug("Shrinking interesting examples")712 for prev_data in sorted(713 self.interesting_examples.values(), key=lambda d: sort_key(d.buffer)714 ):715 assert prev_data.status == Status.INTERESTING716 data = self.new_conjecture_data_for_buffer(prev_data.buffer)717 self.test_function(data)718 if data.status != Status.INTERESTING:719 self.exit_with(ExitReason.flaky)720 self.clear_secondary_key()721 while len(self.shrunk_examples) < len(self.interesting_examples):722 target, example = min(723 (724 (k, v)725 for k, v in self.interesting_examples.items()726 if k not in self.shrunk_examples727 ),728 key=lambda kv: (sort_key(kv[1].buffer), sort_key(repr(kv[0]))),729 )730 self.debug("Shrinking %r" % (target,))731 if not self.settings.report_multiple_bugs:732 # If multi-bug reporting is disabled, we shrink our currently-minimal733 # failure, allowing 'slips' to any bug with a smaller minimal example.734 self.shrink(example, lambda d: d.status == Status.INTERESTING)735 return736 def predicate(d):737 if d.status < Status.INTERESTING:738 return False739 return d.interesting_origin == target740 self.shrink(example, predicate)741 self.shrunk_examples.add(target)742 def clear_secondary_key(self):743 if self.has_existing_examples():744 # If we have any smaller examples in the secondary corpus, now is745 # a good time to try them to see if they work as shrinks. They746 # probably won't, but it's worth a shot and gives us a good747 # opportunity to clear out the database.748 # It's not worth trying the primary corpus because we already749 # tried all of those in the initial phase.750 corpus = sorted(751 self.settings.database.fetch(self.secondary_key), key=sort_key752 )753 for c in corpus:754 primary = {v.buffer for v in self.interesting_examples.values()}755 cap = max(map(sort_key, primary))756 if sort_key(c) > cap:757 break758 else:759 self.cached_test_function(c)760 # We unconditionally remove c from the secondary key as it761 # is either now primary or worse than our primary example762 # of this reason for interestingness.763 self.settings.database.delete(self.secondary_key, c)764 def shrink(self, example, predicate):765 s = self.new_shrinker(example, predicate)766 s.shrink()767 return s.shrink_target768 def new_shrinker(self, example, predicate):769 return Shrinker(self, example, predicate)770 def cached_test_function(self, buffer, error_on_discard=False, extend=0):771 """Checks the tree to see if we've tested this buffer, and returns the772 previous result if we have.773 Otherwise we call through to ``test_function``, and return a774 fresh result.775 If ``error_on_discard`` is set to True this will raise ``ContainsDiscard``776 in preference to running the actual test function. This is to allow us777 to skip test cases we expect to be redundant in some cases. Note that778 it may be the case that we don't raise ``ContainsDiscard`` even if the779 result has discards if we cannot determine from previous runs whether780 it will have a discard.781 """782 buffer = bytes(buffer)[:BUFFER_SIZE]783 max_length = min(BUFFER_SIZE, len(buffer) + extend)784 def check_result(result):785 assert result is Overrun or (786 isinstance(result, ConjectureResult) and result.status != Status.OVERRUN787 )788 return result789 try:790 return check_result(self.__data_cache[buffer])791 except KeyError:792 pass793 if error_on_discard:794 class DiscardObserver(DataObserver):795 def kill_branch(self):796 raise ContainsDiscard()797 observer = DiscardObserver()798 else:799 observer = DataObserver()800 dummy_data = self.new_conjecture_data(801 prefix=buffer, max_length=max_length, observer=observer802 )803 try:804 self.tree.simulate_test_function(dummy_data)805 except PreviouslyUnseenBehaviour:806 pass807 else:808 if dummy_data.status > Status.OVERRUN:809 dummy_data.freeze()810 try:811 return self.__data_cache[dummy_data.buffer]812 except KeyError:813 pass814 else:815 self.__data_cache[buffer] = Overrun816 return Overrun817 # We didn't find a match in the tree, so we need to run the test818 # function normally. Note that test_function will automatically819 # add this to the tree so we don't need to update the cache.820 result = None821 data = self.new_conjecture_data(822 prefix=max((buffer, dummy_data.buffer), key=len), max_length=max_length,823 )824 self.test_function(data)825 result = check_result(data.as_result())826 self.__data_cache[buffer] = result827 return result828 def event_to_string(self, event):829 if isinstance(event, str):830 return event831 try:832 return self.events_to_strings[event]833 except KeyError:834 pass835 result = str(event)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful