How to use generate_novel_prefix method in hypothesis

Best Python code snippet using hypothesis

engine.py

Source:engine.py Github

copy

Full Screen

...173 self.exit_with(ExitReason.max_iterations)174 if self.__tree_is_exhausted():175 self.exit_with(ExitReason.finished)176 self.record_for_health_check(data)177 def generate_novel_prefix(self):178 """Uses the tree to proactively generate a starting sequence of bytes179 that we haven't explored yet for this test.180 When this method is called, we assume that there must be at181 least one novel prefix left to find. If there were not, then the182 test run should have already stopped due to tree exhaustion.183 """184 return self.tree.generate_novel_prefix(self.random)185 @property186 def cap(self):187 return self.settings.buffer_size // 2188 def record_for_health_check(self, data):189 # Once we've actually found a bug, there's no point in trying to run190 # health checks - they'll just mask the actually important information.191 if data.status == Status.INTERESTING:192 self.health_check_state = None193 state = self.health_check_state194 if state is None:195 return196 state.draw_times.extend(data.draw_times)197 if data.status == Status.VALID:198 state.valid_examples += 1199 elif data.status == Status.INVALID:200 state.invalid_examples += 1201 else:202 assert data.status == Status.OVERRUN203 state.overrun_examples += 1204 max_valid_draws = 10205 max_invalid_draws = 50206 max_overrun_draws = 20207 assert state.valid_examples <= max_valid_draws208 if state.valid_examples == max_valid_draws:209 self.health_check_state = None210 return211 if state.overrun_examples == max_overrun_draws:212 fail_health_check(213 self.settings,214 (215 "Examples routinely exceeded the max allowable size. "216 "(%d examples overran while generating %d valid ones)"217 ". Generating examples this large will usually lead to"218 " bad results. You could try setting max_size parameters "219 "on your collections and turning "220 "max_leaves down on recursive() calls."221 )222 % (state.overrun_examples, state.valid_examples),223 HealthCheck.data_too_large,224 )225 if state.invalid_examples == max_invalid_draws:226 fail_health_check(227 self.settings,228 (229 "It looks like your strategy is filtering out a lot "230 "of data. Health check found %d filtered examples but "231 "only %d good ones. This will make your tests much "232 "slower, and also will probably distort the data "233 "generation quite a lot. You should adapt your "234 "strategy to filter less. This can also be caused by "235 "a low max_leaves parameter in recursive() calls"236 )237 % (state.invalid_examples, state.valid_examples),238 HealthCheck.filter_too_much,239 )240 draw_time = sum(state.draw_times)241 if draw_time > 1.0:242 fail_health_check(243 self.settings,244 (245 "Data generation is extremely slow: Only produced "246 "%d valid examples in %.2f seconds (%d invalid ones "247 "and %d exceeded maximum size). Try decreasing "248 "size of the data you're generating (with e.g."249 "max_size or max_leaves parameters)."250 )251 % (252 state.valid_examples,253 draw_time,254 state.invalid_examples,255 state.overrun_examples,256 ),257 HealthCheck.too_slow,258 )259 def save_buffer(self, buffer):260 if self.settings.database is not None:261 key = self.database_key262 if key is None:263 return264 self.settings.database.save(key, hbytes(buffer))265 def downgrade_buffer(self, buffer):266 if self.settings.database is not None and self.database_key is not None:267 self.settings.database.move(self.database_key, self.secondary_key, buffer)268 @property269 def secondary_key(self):270 return b".".join((self.database_key, b"secondary"))271 @property272 def covering_key(self):273 return b".".join((self.database_key, b"coverage"))274 def note_details(self, data):275 self.__data_cache[data.buffer] = data.as_result()276 runtime = max(data.finish_time - data.start_time, 0.0)277 self.all_runtimes.append(runtime)278 self.all_drawtimes.extend(data.draw_times)279 self.status_runtimes.setdefault(data.status, []).append(runtime)280 for event in set(map(self.event_to_string, data.events)):281 self.event_call_counts[event] += 1282 def debug(self, message):283 if self.settings.verbosity >= Verbosity.debug:284 base_report(message)285 @property286 def report_debug_info(self):287 return self.settings.verbosity >= Verbosity.debug288 def debug_data(self, data):289 if not self.report_debug_info:290 return291 stack = [[]]292 def go(ex):293 if ex.length == 0:294 return295 if len(ex.children) == 0:296 stack[-1].append(int_from_bytes(data.buffer[ex.start : ex.end]))297 else:298 node = []299 stack.append(node)300 for v in ex.children:301 go(v)302 stack.pop()303 if len(node) == 1:304 stack[-1].extend(node)305 else:306 stack[-1].append(node)307 go(data.examples[0])308 assert len(stack) == 1309 status = repr(data.status)310 if data.status == Status.INTERESTING:311 status = "%s (%r)" % (status, data.interesting_origin)312 self.debug(313 "%d bytes %r -> %s, %s" % (data.index, stack[0], status, data.output)314 )315 def run(self):316 with local_settings(self.settings):317 try:318 self._run()319 except RunIsComplete:320 pass321 for v in self.interesting_examples.values():322 self.debug_data(v)323 self.debug(324 u"Run complete after %d examples (%d valid) and %d shrinks"325 % (self.call_count, self.valid_examples, self.shrinks)326 )327 def _new_mutator(self):328 target_data = [None]329 def draw_new(data, n):330 return uniform(self.random, n)331 def draw_existing(data, n):332 return target_data[0].buffer[data.index : data.index + n]333 def draw_smaller(data, n):334 existing = target_data[0].buffer[data.index : data.index + n]335 r = uniform(self.random, n)336 if r <= existing:337 return r338 return _draw_predecessor(self.random, existing)339 def draw_larger(data, n):340 existing = target_data[0].buffer[data.index : data.index + n]341 r = uniform(self.random, n)342 if r >= existing:343 return r344 return _draw_successor(self.random, existing)345 def reuse_existing(data, n):346 choices = data.block_starts.get(n, [])347 if choices:348 i = self.random.choice(choices)349 assert i + n <= len(data.buffer)350 return hbytes(data.buffer[i : i + n])351 else:352 result = uniform(self.random, n)353 assert isinstance(result, hbytes)354 return result355 def flip_bit(data, n):356 buf = bytearray(target_data[0].buffer[data.index : data.index + n])357 i = self.random.randint(0, n - 1)358 k = self.random.randint(0, 7)359 buf[i] ^= 1 << k360 return hbytes(buf)361 def draw_zero(data, n):362 return hbytes(b"\0" * n)363 def draw_max(data, n):364 return hbytes([255]) * n365 def draw_constant(data, n):366 return hbytes([self.random.randint(0, 255)]) * n367 def redraw_last(data, n):368 u = target_data[0].blocks[-1].start369 if data.index + n <= u:370 return target_data[0].buffer[data.index : data.index + n]371 else:372 return uniform(self.random, n)373 options = [374 draw_new,375 redraw_last,376 redraw_last,377 reuse_existing,378 reuse_existing,379 draw_existing,380 draw_smaller,381 draw_larger,382 flip_bit,383 draw_zero,384 draw_max,385 draw_zero,386 draw_max,387 draw_constant,388 ]389 bits = [self.random.choice(options) for _ in hrange(3)]390 prefix = [None]391 def mutate_from(origin):392 target_data[0] = origin393 prefix[0] = self.generate_novel_prefix()394 return draw_mutated395 def draw_mutated(data, n):396 if data.index + n > len(target_data[0].buffer):397 result = uniform(self.random, n)398 else:399 draw = self.random.choice(bits)400 result = draw(data, n)401 p = prefix[0]402 if data.index < len(p):403 start = p[data.index : data.index + n]404 result = start + result[len(start) :]405 assert len(result) == n406 return self.__zero_bound(data, result)407 return mutate_from408 def __rewrite(self, data, result):409 return self.__zero_bound(data, result)410 def __zero_bound(self, data, result):411 """This tries to get the size of the generated data under control by412 replacing the result with zero if we are too deep or have already413 generated too much data.414 This causes us to enter "shrinking mode" there and thus reduce415 the size of the generated data.416 """417 initial = len(result)418 if data.depth * 2 >= MAX_DEPTH or data.index >= self.cap:419 data.forced_indices.update(hrange(data.index, data.index + initial))420 data.hit_zero_bound = True421 result = hbytes(initial)422 elif data.index + initial >= self.cap:423 data.hit_zero_bound = True424 n = self.cap - data.index425 data.forced_indices.update(hrange(self.cap, data.index + initial))426 result = result[:n] + hbytes(initial - n)427 assert len(result) == initial428 return result429 @property430 def database(self):431 if self.database_key is None:432 return None433 return self.settings.database434 def has_existing_examples(self):435 return self.database is not None and Phase.reuse in self.settings.phases436 def reuse_existing_examples(self):437 """If appropriate (we have a database and have been told to use it),438 try to reload existing examples from the database.439 If there are a lot we don't try all of them. We always try the440 smallest example in the database (which is guaranteed to be the441 last failure) and the largest (which is usually the seed example442 which the last failure came from but we don't enforce that). We443 then take a random sampling of the remainder and try those. Any444 examples that are no longer interesting are cleared out.445 """446 if self.has_existing_examples():447 self.debug("Reusing examples from database")448 # We have to do some careful juggling here. We have two database449 # corpora: The primary and secondary. The primary corpus is a450 # small set of minimized examples each of which has at one point451 # demonstrated a distinct bug. We want to retry all of these.452 # We also have a secondary corpus of examples that have at some453 # point demonstrated interestingness (currently only ones that454 # were previously non-minimal examples of a bug, but this will455 # likely expand in future). These are a good source of potentially456 # interesting examples, but there are a lot of them, so we down457 # sample the secondary corpus to a more manageable size.458 corpus = sorted(459 self.settings.database.fetch(self.database_key), key=sort_key460 )461 desired_size = max(2, ceil(0.1 * self.settings.max_examples))462 for extra_key in [self.secondary_key, self.covering_key]:463 if len(corpus) < desired_size:464 extra_corpus = list(self.settings.database.fetch(extra_key))465 shortfall = desired_size - len(corpus)466 if len(extra_corpus) <= shortfall:467 extra = extra_corpus468 else:469 extra = self.random.sample(extra_corpus, shortfall)470 extra.sort(key=sort_key)471 corpus.extend(extra)472 self.used_examples_from_database = len(corpus) > 0473 for existing in corpus:474 last_data = ConjectureData.for_buffer(475 existing, observer=self.tree.new_observer()476 )477 try:478 self.test_function(last_data)479 finally:480 if last_data.status != Status.INTERESTING:481 self.settings.database.delete(self.database_key, existing)482 self.settings.database.delete(self.secondary_key, existing)483 def exit_with(self, reason):484 self.exit_reason = reason485 raise RunIsComplete()486 def generate_new_examples(self):487 if Phase.generate not in self.settings.phases:488 return489 zero_data = self.cached_test_function(hbytes(self.settings.buffer_size))490 if zero_data.status > Status.OVERRUN:491 self.__data_cache.pin(zero_data.buffer)492 if zero_data.status == Status.OVERRUN or (493 zero_data.status == Status.VALID494 and len(zero_data.buffer) * 2 > self.settings.buffer_size495 ):496 fail_health_check(497 self.settings,498 "The smallest natural example for your test is extremely "499 "large. This makes it difficult for Hypothesis to generate "500 "good examples, especially when trying to reduce failing ones "501 "at the end. Consider reducing the size of your data if it is "502 "of a fixed size. You could also fix this by improving how "503 "your data shrinks (see https://hypothesis.readthedocs.io/en/"504 "latest/data.html#shrinking for details), or by introducing "505 "default values inside your strategy. e.g. could you replace "506 "some arguments with their defaults by using "507 "one_of(none(), some_complex_strategy)?",508 HealthCheck.large_base_example,509 )510 if zero_data is not Overrun:511 # If the language starts with writes of length >= cap then there is512 # only one string in it: Everything after cap is forced to be zero (or513 # to be whatever value is written there). That means that once we've514 # tried the zero value, there's nothing left for us to do, so we515 # exit early here.516 has_non_forced = False517 # It's impossible to fall out of this loop normally because if we518 # did then that would mean that all blocks are writes, so we would519 # already have triggered the exhaustedness check on the tree and520 # finished running.521 for b in zero_data.blocks: # pragma: no branch522 if b.start >= self.cap:523 break524 if not b.forced:525 has_non_forced = True526 break527 if not has_non_forced:528 self.exit_with(ExitReason.finished)529 self.health_check_state = HealthCheckState()530 count = 0531 while not self.interesting_examples and (532 count < 10 or self.health_check_state is not None533 ):534 prefix = self.generate_novel_prefix()535 def draw_bytes(data, n):536 if data.index < len(prefix):537 result = prefix[data.index : data.index + n]538 # We always draw prefixes as a whole number of blocks539 assert len(result) == n540 else:541 result = uniform(self.random, n)542 return self.__zero_bound(data, result)543 last_data = self.new_conjecture_data(draw_bytes)544 self.test_function(last_data)545 last_data.freeze()546 count += 1547 mutations = 0548 mutator = self._new_mutator()...

Full Screen

Full Screen

test_data_tree.py

Source:test_data_tree.py Github

copy

Full Screen

...84 data.write(b"\0")85 data.draw_bits(2)86 runner = ConjectureRunner(tf, settings=TEST_SETTINGS, random=Random(0))87 for _ in range(100):88 prefix = runner.tree.generate_novel_prefix(runner.random)89 example = prefix + bytes(8 - len(prefix))90 assert runner.tree.rewrite(example)[1] is None91 result = runner.cached_test_function(example)92 assert runner.tree.rewrite(example)[0] == result.buffer93def test_overruns_if_not_enough_bytes_for_block():94 runner = ConjectureRunner(95 lambda data: data.draw_bytes(2), settings=TEST_SETTINGS, random=Random(0)96 )97 runner.cached_test_function(b"\0\0")98 assert runner.tree.rewrite(b"\0")[1] == Status.OVERRUN99def test_overruns_if_prefix():100 runner = ConjectureRunner(101 lambda data: [data.draw_bits(1) for _ in range(2)],102 settings=TEST_SETTINGS,103 random=Random(0),104 )105 runner.cached_test_function(b"\0\0")106 assert runner.tree.rewrite(b"\0")[1] == Status.OVERRUN107def test_stores_the_tree_flat_until_needed():108 @runner_for(bytes(10))109 def runner(data):110 for _ in range(10):111 data.draw_bits(1)112 data.mark_interesting()113 root = runner.tree.root114 assert len(root.bit_lengths) == 10115 assert len(root.values) == 10116 assert root.transition.status == Status.INTERESTING117def test_split_in_the_middle():118 @runner_for([0, 0, 2], [0, 1, 3])119 def runner(data):120 data.draw_bits(1)121 data.draw_bits(1)122 data.draw_bits(4)123 data.mark_interesting()124 root = runner.tree.root125 assert len(root.bit_lengths) == len(root.values) == 1126 assert list(root.transition.children[0].values) == [2]127 assert list(root.transition.children[1].values) == [3]128def test_stores_forced_nodes():129 @runner_for(bytes(3))130 def runner(data):131 data.draw_bits(1, forced=0)132 data.draw_bits(1)133 data.draw_bits(1, forced=0)134 data.mark_interesting()135 root = runner.tree.root136 assert root.forced == {0, 2}137def test_correctly_relocates_forced_nodes():138 @runner_for([0, 0], [1, 0])139 def runner(data):140 data.draw_bits(1)141 data.draw_bits(1, forced=0)142 data.mark_interesting()143 root = runner.tree.root144 assert root.transition.children[1].forced == {0}145 assert root.transition.children[0].forced == {0}146def test_can_go_from_interesting_to_valid():147 tree = DataTree()148 data = ConjectureData.for_buffer(b"", observer=tree.new_observer())149 with pytest.raises(StopTest):150 data.conclude_test(Status.INTERESTING)151 data = ConjectureData.for_buffer(b"", observer=tree.new_observer())152 with pytest.raises(StopTest):153 data.conclude_test(Status.VALID)154def test_going_from_interesting_to_invalid_is_flaky():155 tree = DataTree()156 data = ConjectureData.for_buffer(b"", observer=tree.new_observer())157 with pytest.raises(StopTest):158 data.conclude_test(Status.INTERESTING)159 data = ConjectureData.for_buffer(b"", observer=tree.new_observer())160 with pytest.raises(Flaky):161 data.conclude_test(Status.INVALID)162def test_concluding_at_prefix_is_flaky():163 tree = DataTree()164 data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())165 data.draw_bits(1)166 with pytest.raises(StopTest):167 data.conclude_test(Status.INTERESTING)168 data = ConjectureData.for_buffer(b"", observer=tree.new_observer())169 with pytest.raises(Flaky):170 data.conclude_test(Status.INVALID)171def test_concluding_with_overrun_at_prefix_is_not_flaky():172 tree = DataTree()173 data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())174 data.draw_bits(1)175 with pytest.raises(StopTest):176 data.conclude_test(Status.INTERESTING)177 data = ConjectureData.for_buffer(b"", observer=tree.new_observer())178 with pytest.raises(StopTest):179 data.conclude_test(Status.OVERRUN)180def test_changing_n_bits_is_flaky_in_prefix():181 tree = DataTree()182 data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())183 data.draw_bits(1)184 with pytest.raises(StopTest):185 data.conclude_test(Status.INTERESTING)186 data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())187 with pytest.raises(Flaky):188 data.draw_bits(2)189def test_changing_n_bits_is_flaky_in_branch():190 tree = DataTree()191 for i in [0, 1]:192 data = ConjectureData.for_buffer([i], observer=tree.new_observer())193 data.draw_bits(1)194 with pytest.raises(StopTest):195 data.conclude_test(Status.INTERESTING)196 data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())197 with pytest.raises(Flaky):198 data.draw_bits(2)199def test_extending_past_conclusion_is_flaky():200 tree = DataTree()201 data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())202 data.draw_bits(1)203 with pytest.raises(StopTest):204 data.conclude_test(Status.INTERESTING)205 data = ConjectureData.for_buffer(b"\1\0", observer=tree.new_observer())206 data.draw_bits(1)207 with pytest.raises(Flaky):208 data.draw_bits(1)209def test_changing_to_forced_is_flaky():210 tree = DataTree()211 data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())212 data.draw_bits(1)213 with pytest.raises(StopTest):214 data.conclude_test(Status.INTERESTING)215 data = ConjectureData.for_buffer(b"\1\0", observer=tree.new_observer())216 with pytest.raises(Flaky):217 data.draw_bits(1, forced=0)218def test_changing_value_of_forced_is_flaky():219 tree = DataTree()220 data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())221 data.draw_bits(1, forced=1)222 with pytest.raises(StopTest):223 data.conclude_test(Status.INTERESTING)224 data = ConjectureData.for_buffer(b"\1\0", observer=tree.new_observer())225 with pytest.raises(Flaky):226 data.draw_bits(1, forced=0)227def test_does_not_truncate_if_unseen():228 tree = DataTree()229 b = bytes([1, 2, 3, 4])230 assert tree.rewrite(b) == (b, None)231def test_truncates_if_seen():232 tree = DataTree()233 b = bytes([1, 2, 3, 4])234 data = ConjectureData.for_buffer(b, observer=tree.new_observer())235 data.draw_bits(8)236 data.draw_bits(8)237 data.freeze()238 assert tree.rewrite(b) == (b[:2], Status.VALID)239def test_child_becomes_exhausted_after_split():240 tree = DataTree()241 data = ConjectureData.for_buffer([0, 0], observer=tree.new_observer())242 data.draw_bits(8)243 data.draw_bits(8, forced=0)244 data.freeze()245 data = ConjectureData.for_buffer([1, 0], observer=tree.new_observer())246 data.draw_bits(8)247 data.draw_bits(8)248 data.freeze()249 assert not tree.is_exhausted250 assert tree.root.transition.children[0].is_exhausted251def test_will_generate_novel_prefix_to_avoid_exhausted_branches():252 tree = DataTree()253 data = ConjectureData.for_buffer([1], observer=tree.new_observer())254 data.draw_bits(1)255 data.freeze()256 data = ConjectureData.for_buffer([0, 1], observer=tree.new_observer())257 data.draw_bits(1)258 data.draw_bits(8)259 data.freeze()260 prefix = list(tree.generate_novel_prefix(Random(0)))261 assert len(prefix) == 2262 assert prefix[0] == 0263def test_will_mark_changes_in_discard_as_flaky():264 tree = DataTree()265 data = ConjectureData.for_buffer([1, 1], observer=tree.new_observer())266 data.start_example(10)267 data.draw_bits(1)268 data.stop_example()269 data.draw_bits(1)270 data.freeze()271 data = ConjectureData.for_buffer([1, 1], observer=tree.new_observer())272 data.start_example(10)273 data.draw_bits(1)274 with pytest.raises(Flaky):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful