How to use has_existing_examples method in hypothesis

Best Python code snippet using hypothesis

engine.py

Source:engine.py Github

copy

Full Screen

...430 def database(self):431 if self.database_key is None:432 return None433 return self.settings.database434 def has_existing_examples(self):435 return self.database is not None and Phase.reuse in self.settings.phases436 def reuse_existing_examples(self):437 """If appropriate (we have a database and have been told to use it),438 try to reload existing examples from the database.439 If there are a lot we don't try all of them. We always try the440 smallest example in the database (which is guaranteed to be the441 last failure) and the largest (which is usually the seed example442 which the last failure came from but we don't enforce that). We443 then take a random sampling of the remainder and try those. Any444 examples that are no longer interesting are cleared out.445 """446 if self.has_existing_examples():447 self.debug("Reusing examples from database")448 # We have to do some careful juggling here. We have two database449 # corpora: The primary and secondary. The primary corpus is a450 # small set of minimized examples each of which has at one point451 # demonstrated a distinct bug. We want to retry all of these.452 # We also have a secondary corpus of examples that have at some453 # point demonstrated interestingness (currently only ones that454 # were previously non-minimal examples of a bug, but this will455 # likely expand in future). These are a good source of potentially456 # interesting examples, but there are a lot of them, so we down457 # sample the secondary corpus to a more manageable size.458 corpus = sorted(459 self.settings.database.fetch(self.database_key), key=sort_key460 )461 desired_size = max(2, ceil(0.1 * self.settings.max_examples))462 for extra_key in [self.secondary_key, self.covering_key]:463 if len(corpus) < desired_size:464 extra_corpus = list(self.settings.database.fetch(extra_key))465 shortfall = desired_size - len(corpus)466 if len(extra_corpus) <= shortfall:467 extra = extra_corpus468 else:469 extra = self.random.sample(extra_corpus, shortfall)470 extra.sort(key=sort_key)471 corpus.extend(extra)472 self.used_examples_from_database = len(corpus) > 0473 for existing in corpus:474 last_data = ConjectureData.for_buffer(475 existing, observer=self.tree.new_observer()476 )477 try:478 self.test_function(last_data)479 finally:480 if last_data.status != Status.INTERESTING:481 self.settings.database.delete(self.database_key, existing)482 self.settings.database.delete(self.secondary_key, existing)483 def exit_with(self, reason):484 self.exit_reason = reason485 raise RunIsComplete()486 def generate_new_examples(self):487 if Phase.generate not in self.settings.phases:488 return489 zero_data = self.cached_test_function(hbytes(self.settings.buffer_size))490 if zero_data.status > Status.OVERRUN:491 self.__data_cache.pin(zero_data.buffer)492 if zero_data.status == Status.OVERRUN or (493 zero_data.status == Status.VALID494 and len(zero_data.buffer) * 2 > self.settings.buffer_size495 ):496 fail_health_check(497 self.settings,498 "The smallest natural example for your test is extremely "499 "large. This makes it difficult for Hypothesis to generate "500 "good examples, especially when trying to reduce failing ones "501 "at the end. Consider reducing the size of your data if it is "502 "of a fixed size. You could also fix this by improving how "503 "your data shrinks (see https://hypothesis.readthedocs.io/en/"504 "latest/data.html#shrinking for details), or by introducing "505 "default values inside your strategy. e.g. could you replace "506 "some arguments with their defaults by using "507 "one_of(none(), some_complex_strategy)?",508 HealthCheck.large_base_example,509 )510 if zero_data is not Overrun:511 # If the language starts with writes of length >= cap then there is512 # only one string in it: Everything after cap is forced to be zero (or513 # to be whatever value is written there). That means that once we've514 # tried the zero value, there's nothing left for us to do, so we515 # exit early here.516 has_non_forced = False517 # It's impossible to fall out of this loop normally because if we518 # did then that would mean that all blocks are writes, so we would519 # already have triggered the exhaustedness check on the tree and520 # finished running.521 for b in zero_data.blocks: # pragma: no branch522 if b.start >= self.cap:523 break524 if not b.forced:525 has_non_forced = True526 break527 if not has_non_forced:528 self.exit_with(ExitReason.finished)529 self.health_check_state = HealthCheckState()530 count = 0531 while not self.interesting_examples and (532 count < 10 or self.health_check_state is not None533 ):534 prefix = self.generate_novel_prefix()535 def draw_bytes(data, n):536 if data.index < len(prefix):537 result = prefix[data.index : data.index + n]538 # We always draw prefixes as a whole number of blocks539 assert len(result) == n540 else:541 result = uniform(self.random, n)542 return self.__zero_bound(data, result)543 last_data = self.new_conjecture_data(draw_bytes)544 self.test_function(last_data)545 last_data.freeze()546 count += 1547 mutations = 0548 mutator = self._new_mutator()549 zero_bound_queue = []550 while not self.interesting_examples:551 if zero_bound_queue:552 # Whenever we generated an example and it hits a bound553 # which forces zero blocks into it, this creates a weird554 # distortion effect by making certain parts of the data555 # stream (especially ones to the right) much more likely556 # to be zero. We fix this by redistributing the generated557 # data by shuffling it randomly. This results in the558 # zero data being spread evenly throughout the buffer.559 # Hopefully the shrinking this causes will cause us to560 # naturally fail to hit the bound.561 # If it doesn't then we will queue the new version up again562 # (now with more zeros) and try again.563 overdrawn = zero_bound_queue.pop()564 buffer = bytearray(overdrawn.buffer)565 # These will have values written to them that are different566 # from what's in them anyway, so the value there doesn't567 # really "count" for distributional purposes, and if we568 # leave them in then they can cause the fraction of non569 # zero bytes to increase on redraw instead of decrease.570 for i in overdrawn.forced_indices:571 buffer[i] = 0572 self.random.shuffle(buffer)573 buffer = hbytes(buffer)574 def draw_bytes(data, n):575 result = buffer[data.index : data.index + n]576 if len(result) < n:577 result += hbytes(n - len(result))578 return self.__rewrite(data, result)579 data = self.new_conjecture_data(draw_bytes=draw_bytes)580 self.test_function(data)581 data.freeze()582 else:583 origin = self.target_selector.select()584 mutations += 1585 data = self.new_conjecture_data(draw_bytes=mutator(origin))586 self.test_function(data)587 data.freeze()588 if data.status > origin.status:589 mutations = 0590 elif data.status < origin.status or mutations >= 10:591 # Cap the variations of a single example and move on to592 # an entirely fresh start. Ten is an entirely arbitrary593 # constant, but it's been working well for years.594 mutations = 0595 mutator = self._new_mutator()596 if getattr(data, "hit_zero_bound", False):597 zero_bound_queue.append(data)598 mutations += 1599 def _run(self):600 self.reuse_existing_examples()601 self.generate_new_examples()602 self.shrink_interesting_examples()603 self.exit_with(ExitReason.finished)604 def new_conjecture_data(self, draw_bytes):605 return ConjectureData(606 draw_bytes=draw_bytes,607 max_length=self.settings.buffer_size,608 observer=self.tree.new_observer(),609 )610 def new_conjecture_data_for_buffer(self, buffer):611 return ConjectureData.for_buffer(buffer, observer=self.tree.new_observer())612 def shrink_interesting_examples(self):613 """If we've found interesting examples, try to replace each of them614 with a minimal interesting example with the same interesting_origin.615 We may find one or more examples with a new interesting_origin616 during the shrink process. If so we shrink these too.617 """618 if Phase.shrink not in self.settings.phases or not self.interesting_examples:619 return620 for prev_data in sorted(621 self.interesting_examples.values(), key=lambda d: sort_key(d.buffer)622 ):623 assert prev_data.status == Status.INTERESTING624 data = self.new_conjecture_data_for_buffer(prev_data.buffer)625 self.test_function(data)626 if data.status != Status.INTERESTING:627 self.exit_with(ExitReason.flaky)628 self.clear_secondary_key()629 while len(self.shrunk_examples) < len(self.interesting_examples):630 target, example = min(631 [632 (k, v)633 for k, v in self.interesting_examples.items()634 if k not in self.shrunk_examples635 ],636 key=lambda kv: (sort_key(kv[1].buffer), sort_key(repr(kv[0]))),637 )638 self.debug("Shrinking %r" % (target,))639 if not self.settings.report_multiple_bugs:640 # If multi-bug reporting is disabled, we shrink our currently-minimal641 # failure, allowing 'slips' to any bug with a smaller minimal example.642 self.shrink(example, lambda d: d.status == Status.INTERESTING)643 return644 def predicate(d):645 if d.status < Status.INTERESTING:646 return False647 return d.interesting_origin == target648 self.shrink(example, predicate)649 self.shrunk_examples.add(target)650 def clear_secondary_key(self):651 if self.has_existing_examples():652 # If we have any smaller examples in the secondary corpus, now is653 # a good time to try them to see if they work as shrinks. They654 # probably won't, but it's worth a shot and gives us a good655 # opportunity to clear out the database.656 # It's not worth trying the primary corpus because we already657 # tried all of those in the initial phase.658 corpus = sorted(659 self.settings.database.fetch(self.secondary_key), key=sort_key660 )661 for c in corpus:662 primary = {v.buffer for v in self.interesting_examples.values()}663 cap = max(map(sort_key, primary))664 if sort_key(c) > cap:665 break...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful