How to use reuse_existing_examples method in hypothesis

Best Python code snippet using hypothesis

engine.py

Source:engine.py Github

copy

Full Screen

...432 return None433 return self.settings.database434 def has_existing_examples(self):435 return self.database is not None and Phase.reuse in self.settings.phases436 def reuse_existing_examples(self):437 """If appropriate (we have a database and have been told to use it),438 try to reload existing examples from the database.439 If there are a lot we don't try all of them. We always try the440 smallest example in the database (which is guaranteed to be the441 last failure) and the largest (which is usually the seed example442 which the last failure came from but we don't enforce that). We443 then take a random sampling of the remainder and try those. Any444 examples that are no longer interesting are cleared out.445 """446 if self.has_existing_examples():447 self.debug("Reusing examples from database")448 # We have to do some careful juggling here. We have two database449 # corpora: The primary and secondary. The primary corpus is a450 # small set of minimized examples each of which has at one point451 # demonstrated a distinct bug. We want to retry all of these.452 # We also have a secondary corpus of examples that have at some453 # point demonstrated interestingness (currently only ones that454 # were previously non-minimal examples of a bug, but this will455 # likely expand in future). These are a good source of potentially456 # interesting examples, but there are a lot of them, so we down457 # sample the secondary corpus to a more manageable size.458 corpus = sorted(459 self.settings.database.fetch(self.database_key), key=sort_key460 )461 desired_size = max(2, ceil(0.1 * self.settings.max_examples))462 for extra_key in [self.secondary_key, self.covering_key]:463 if len(corpus) < desired_size:464 extra_corpus = list(self.settings.database.fetch(extra_key))465 shortfall = desired_size - len(corpus)466 if len(extra_corpus) <= shortfall:467 extra = extra_corpus468 else:469 extra = self.random.sample(extra_corpus, shortfall)470 extra.sort(key=sort_key)471 corpus.extend(extra)472 self.used_examples_from_database = len(corpus) > 0473 for existing in corpus:474 last_data = ConjectureData.for_buffer(475 existing, observer=self.tree.new_observer()476 )477 try:478 self.test_function(last_data)479 finally:480 if last_data.status != Status.INTERESTING:481 self.settings.database.delete(self.database_key, existing)482 self.settings.database.delete(self.secondary_key, existing)483 def exit_with(self, reason):484 self.exit_reason = reason485 raise RunIsComplete()486 def generate_new_examples(self):487 if Phase.generate not in self.settings.phases:488 return489 zero_data = self.cached_test_function(hbytes(self.settings.buffer_size))490 if zero_data.status > Status.OVERRUN:491 self.__data_cache.pin(zero_data.buffer)492 if zero_data.status == Status.OVERRUN or (493 zero_data.status == Status.VALID494 and len(zero_data.buffer) * 2 > self.settings.buffer_size495 ):496 fail_health_check(497 self.settings,498 "The smallest natural example for your test is extremely "499 "large. This makes it difficult for Hypothesis to generate "500 "good examples, especially when trying to reduce failing ones "501 "at the end. Consider reducing the size of your data if it is "502 "of a fixed size. You could also fix this by improving how "503 "your data shrinks (see https://hypothesis.readthedocs.io/en/"504 "latest/data.html#shrinking for details), or by introducing "505 "default values inside your strategy. e.g. could you replace "506 "some arguments with their defaults by using "507 "one_of(none(), some_complex_strategy)?",508 HealthCheck.large_base_example,509 )510 if zero_data is not Overrun:511 # If the language starts with writes of length >= cap then there is512 # only one string in it: Everything after cap is forced to be zero (or513 # to be whatever value is written there). That means that once we've514 # tried the zero value, there's nothing left for us to do, so we515 # exit early here.516 has_non_forced = False517 # It's impossible to fall out of this loop normally because if we518 # did then that would mean that all blocks are writes, so we would519 # already have triggered the exhaustedness check on the tree and520 # finished running.521 for b in zero_data.blocks: # pragma: no branch522 if b.start >= self.cap:523 break524 if not b.forced:525 has_non_forced = True526 break527 if not has_non_forced:528 self.exit_with(ExitReason.finished)529 self.health_check_state = HealthCheckState()530 count = 0531 while not self.interesting_examples and (532 count < 10 or self.health_check_state is not None533 ):534 prefix = self.generate_novel_prefix()535 def draw_bytes(data, n):536 if data.index < len(prefix):537 result = prefix[data.index : data.index + n]538 # We always draw prefixes as a whole number of blocks539 assert len(result) == n540 else:541 result = uniform(self.random, n)542 return self.__zero_bound(data, result)543 last_data = self.new_conjecture_data(draw_bytes)544 self.test_function(last_data)545 last_data.freeze()546 count += 1547 mutations = 0548 mutator = self._new_mutator()549 zero_bound_queue = []550 while not self.interesting_examples:551 if zero_bound_queue:552 # Whenever we generated an example and it hits a bound553 # which forces zero blocks into it, this creates a weird554 # distortion effect by making certain parts of the data555 # stream (especially ones to the right) much more likely556 # to be zero. We fix this by redistributing the generated557 # data by shuffling it randomly. This results in the558 # zero data being spread evenly throughout the buffer.559 # Hopefully the shrinking this causes will cause us to560 # naturally fail to hit the bound.561 # If it doesn't then we will queue the new version up again562 # (now with more zeros) and try again.563 overdrawn = zero_bound_queue.pop()564 buffer = bytearray(overdrawn.buffer)565 # These will have values written to them that are different566 # from what's in them anyway, so the value there doesn't567 # really "count" for distributional purposes, and if we568 # leave them in then they can cause the fraction of non569 # zero bytes to increase on redraw instead of decrease.570 for i in overdrawn.forced_indices:571 buffer[i] = 0572 self.random.shuffle(buffer)573 buffer = hbytes(buffer)574 def draw_bytes(data, n):575 result = buffer[data.index : data.index + n]576 if len(result) < n:577 result += hbytes(n - len(result))578 return self.__rewrite(data, result)579 data = self.new_conjecture_data(draw_bytes=draw_bytes)580 self.test_function(data)581 data.freeze()582 else:583 origin = self.target_selector.select()584 mutations += 1585 data = self.new_conjecture_data(draw_bytes=mutator(origin))586 self.test_function(data)587 data.freeze()588 if data.status > origin.status:589 mutations = 0590 elif data.status < origin.status or mutations >= 10:591 # Cap the variations of a single example and move on to592 # an entirely fresh start. Ten is an entirely arbitrary593 # constant, but it's been working well for years.594 mutations = 0595 mutator = self._new_mutator()596 if getattr(data, "hit_zero_bound", False):597 zero_bound_queue.append(data)598 mutations += 1599 def _run(self):600 self.reuse_existing_examples()601 self.generate_new_examples()602 self.shrink_interesting_examples()603 self.exit_with(ExitReason.finished)604 def new_conjecture_data(self, draw_bytes):605 return ConjectureData(606 draw_bytes=draw_bytes,607 max_length=self.settings.buffer_size,608 observer=self.tree.new_observer(),609 )610 def new_conjecture_data_for_buffer(self, buffer):611 return ConjectureData.for_buffer(buffer, observer=self.tree.new_observer())612 def shrink_interesting_examples(self):613 """If we've found interesting examples, try to replace each of them614 with a minimal interesting example with the same interesting_origin....

Full Screen

Full Screen

test_pareto.py

Source:test_pareto.py Github

copy

Full Screen

...99 )100 for i in range(10000):101 db.save(runner.pareto_key, int_to_bytes(i, 2))102 with pytest.raises(RunIsComplete):103 runner.reuse_existing_examples()104 assert runner.valid_examples == 1000105def test_stops_loading_pareto_front_if_interesting():106 with deterministic_PRNG():107 def test(data):108 data.draw_bits(8)109 data.draw_bits(8)110 data.mark_interesting()111 db = InMemoryExampleDatabase()112 runner = ConjectureRunner(113 test,114 settings=settings(115 max_examples=1000,116 database=db,117 suppress_health_check=HealthCheck.all(),118 phases=[Phase.reuse],119 ),120 database_key=b"stuff",121 )122 for i in range(10000):123 db.save(runner.pareto_key, int_to_bytes(i, 2))124 runner.reuse_existing_examples()125 assert runner.call_count == 1126def test_uses_tags_in_calculating_pareto_front():127 with deterministic_PRNG():128 def test(data):129 if data.draw_bits(1):130 data.start_example(11)131 data.draw_bits(8)132 data.stop_example()133 runner = ConjectureRunner(134 test,135 settings=settings(max_examples=10, database=InMemoryExampleDatabase()),136 database_key=b"stuff",137 )138 runner.run()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful