How to use _log_phase_statistics method in hypothesis

Best Python code snippet using hypothesis

engine.py

Source:engine.py Github

copy

Full Screen

...125 self.__pending_call_explanation = explanation126 def clear_call_explanation(self):127 self.__pending_call_explanation = None128 @contextmanager129 def _log_phase_statistics(self, phase):130 self.stats_per_test_case.clear()131 start_time = time.perf_counter()132 try:133 yield134 finally:135 self.statistics[phase + "-phase"] = {136 "duration-seconds": time.perf_counter() - start_time,137 "test-cases": list(self.stats_per_test_case),138 "distinct-failures": len(self.interesting_examples),139 "shrinks-successful": self.shrinks,140 }141 @property142 def should_optimise(self):143 return Phase.target in self.settings.phases144 def __tree_is_exhausted(self):145 return self.tree.is_exhausted146 def __stoppable_test_function(self, data):147 """Run ``self._test_function``, but convert a ``StopTest`` exception148 into a normal return and avoid raising Flaky for RecursionErrors.149 """150 depth = stack_depth_of_caller()151 # Because we add to the recursion limit, to be good citizens we also add152 # a check for unbounded recursion. The default limit is 1000, so this can153 # only ever trigger if something really strange is happening and it's hard154 # to imagine an intentionally-deeply-recursive use of this code.155 assert depth <= 1000, (156 "Hypothesis would usually add %d to the stack depth of %d here, "157 "but we are already much deeper than expected. Aborting now, to "158 "avoid extending the stack limit in an infinite loop..."159 % (self.__recursion_limit, depth)160 )161 try:162 sys.setrecursionlimit(depth + self.__recursion_limit)163 self._test_function(data)164 except StopTest as e:165 if e.testcounter == data.testcounter:166 # This StopTest has successfully stopped its test, and can now167 # be discarded.168 pass169 else:170 # This StopTest was raised by a different ConjectureData. We171 # need to re-raise it so that it will eventually reach the172 # correct engine.173 raise174 finally:175 sys.setrecursionlimit(self.__recursion_limit)176 def test_function(self, data):177 if self.__pending_call_explanation is not None:178 self.debug(self.__pending_call_explanation)179 self.__pending_call_explanation = None180 assert isinstance(data.observer, TreeRecordingObserver)181 self.call_count += 1182 interrupted = False183 try:184 self.__stoppable_test_function(data)185 except KeyboardInterrupt:186 interrupted = True187 raise188 except BaseException:189 self.save_buffer(data.buffer)190 raise191 finally:192 # No branch, because if we're interrupted we always raise193 # the KeyboardInterrupt, never continue to the code below.194 if not interrupted: # pragma: no branch195 data.freeze()196 call_stats = {197 "status": data.status.name.lower(),198 "runtime": data.finish_time - data.start_time,199 "drawtime": math.fsum(data.draw_times),200 "events": sorted({self.event_to_string(e) for e in data.events}),201 }202 self.stats_per_test_case.append(call_stats)203 self.__data_cache[data.buffer] = data.as_result()204 self.debug_data(data)205 if self.pareto_front is not None and self.pareto_front.add(data.as_result()):206 self.save_buffer(data.buffer, sub_key=b"pareto")207 assert len(data.buffer) <= BUFFER_SIZE208 if data.status >= Status.VALID:209 for k, v in data.target_observations.items():210 self.best_observed_targets[k] = max(self.best_observed_targets[k], v)211 if k not in self.best_examples_of_observed_targets:212 self.best_examples_of_observed_targets[k] = data.as_result()213 continue214 existing_example = self.best_examples_of_observed_targets[k]215 existing_score = existing_example.target_observations[k]216 if v < existing_score:217 continue218 if v > existing_score or sort_key(data.buffer) < sort_key(219 existing_example.buffer220 ):221 self.best_examples_of_observed_targets[k] = data.as_result()222 if data.status == Status.VALID:223 self.valid_examples += 1224 if data.status == Status.INTERESTING:225 key = data.interesting_origin226 changed = False227 try:228 existing = self.interesting_examples[key]229 except KeyError:230 changed = True231 self.last_bug_found_at = self.call_count232 if self.first_bug_found_at is None:233 self.first_bug_found_at = self.call_count234 else:235 if sort_key(data.buffer) < sort_key(existing.buffer):236 self.shrinks += 1237 self.downgrade_buffer(existing.buffer)238 self.__data_cache.unpin(existing.buffer)239 changed = True240 if changed:241 self.save_buffer(data.buffer)242 self.interesting_examples[key] = data.as_result()243 self.__data_cache.pin(data.buffer)244 self.shrunk_examples.discard(key)245 if self.shrinks >= MAX_SHRINKS:246 self.exit_with(ExitReason.max_shrinks)247 if (248 not self.ignore_limits249 and self.finish_shrinking_deadline is not None250 and self.finish_shrinking_deadline < time.perf_counter()251 ):252 # See https://github.com/HypothesisWorks/hypothesis/issues/2340253 report(254 "WARNING: Hypothesis has spent more than five minutes working to shrink "255 "a failing example, and stopped because it is making very slow "256 "progress. When you re-run your tests, shrinking will resume and "257 "may take this long before aborting again.\n"258 "PLEASE REPORT THIS if you can provide a reproducing example, so that "259 "we can improve shrinking performance for everyone."260 )261 self.exit_with(ExitReason.very_slow_shrinking)262 if not self.interesting_examples:263 # Note that this logic is reproduced to end the generation phase when264 # we have interesting examples. Update that too if you change this!265 # (The doubled implementation is because here we exit the engine entirely,266 # while in the other case below we just want to move on to shrinking.)267 if self.valid_examples >= self.settings.max_examples:268 self.exit_with(ExitReason.max_examples)269 if self.call_count >= max(270 self.settings.max_examples * 10,271 # We have a high-ish default max iterations, so that tests272 # don't become flaky when max_examples is too low.273 1000,274 ):275 self.exit_with(ExitReason.max_iterations)276 if self.__tree_is_exhausted():277 self.exit_with(ExitReason.finished)278 self.record_for_health_check(data)279 def on_pareto_evict(self, data):280 self.settings.database.delete(self.pareto_key, data.buffer)281 def generate_novel_prefix(self):282 """Uses the tree to proactively generate a starting sequence of bytes283 that we haven't explored yet for this test.284 When this method is called, we assume that there must be at285 least one novel prefix left to find. If there were not, then the286 test run should have already stopped due to tree exhaustion.287 """288 return self.tree.generate_novel_prefix(self.random)289 def record_for_health_check(self, data):290 # Once we've actually found a bug, there's no point in trying to run291 # health checks - they'll just mask the actually important information.292 if data.status == Status.INTERESTING:293 self.health_check_state = None294 state = self.health_check_state295 if state is None:296 return297 state.draw_times.extend(data.draw_times)298 if data.status == Status.VALID:299 state.valid_examples += 1300 elif data.status == Status.INVALID:301 state.invalid_examples += 1302 else:303 assert data.status == Status.OVERRUN304 state.overrun_examples += 1305 max_valid_draws = 10306 max_invalid_draws = 50307 max_overrun_draws = 20308 assert state.valid_examples <= max_valid_draws309 if state.valid_examples == max_valid_draws:310 self.health_check_state = None311 return312 if state.overrun_examples == max_overrun_draws:313 fail_health_check(314 self.settings,315 (316 "Examples routinely exceeded the max allowable size. "317 "(%d examples overran while generating %d valid ones)"318 ". Generating examples this large will usually lead to"319 " bad results. You could try setting max_size parameters "320 "on your collections and turning "321 "max_leaves down on recursive() calls."322 )323 % (state.overrun_examples, state.valid_examples),324 HealthCheck.data_too_large,325 )326 if state.invalid_examples == max_invalid_draws:327 fail_health_check(328 self.settings,329 (330 "It looks like your strategy is filtering out a lot "331 "of data. Health check found %d filtered examples but "332 "only %d good ones. This will make your tests much "333 "slower, and also will probably distort the data "334 "generation quite a lot. You should adapt your "335 "strategy to filter less. This can also be caused by "336 "a low max_leaves parameter in recursive() calls"337 )338 % (state.invalid_examples, state.valid_examples),339 HealthCheck.filter_too_much,340 )341 draw_time = sum(state.draw_times)342 if draw_time > 1.0:343 fail_health_check(344 self.settings,345 (346 "Data generation is extremely slow: Only produced "347 "%d valid examples in %.2f seconds (%d invalid ones "348 "and %d exceeded maximum size). Try decreasing "349 "size of the data you're generating (with e.g."350 "max_size or max_leaves parameters)."351 )352 % (353 state.valid_examples,354 draw_time,355 state.invalid_examples,356 state.overrun_examples,357 ),358 HealthCheck.too_slow,359 )360 def save_buffer(self, buffer, sub_key=None):361 if self.settings.database is not None:362 key = self.sub_key(sub_key)363 if key is None:364 return365 self.settings.database.save(key, bytes(buffer))366 def downgrade_buffer(self, buffer):367 if self.settings.database is not None and self.database_key is not None:368 self.settings.database.move(self.database_key, self.secondary_key, buffer)369 def sub_key(self, sub_key):370 if self.database_key is None:371 return None372 if sub_key is None:373 return self.database_key374 return b".".join((self.database_key, sub_key))375 @property376 def secondary_key(self):377 return self.sub_key(b"secondary")378 @property379 def pareto_key(self):380 return self.sub_key(b"pareto")381 def debug(self, message):382 if self.settings.verbosity >= Verbosity.debug:383 base_report(message)384 @property385 def report_debug_info(self):386 return self.settings.verbosity >= Verbosity.debug387 def debug_data(self, data):388 if not self.report_debug_info:389 return390 stack = [[]]391 def go(ex):392 if ex.length == 0:393 return394 if len(ex.children) == 0:395 stack[-1].append(int_from_bytes(data.buffer[ex.start : ex.end]))396 else:397 node = []398 stack.append(node)399 for v in ex.children:400 go(v)401 stack.pop()402 if len(node) == 1:403 stack[-1].extend(node)404 else:405 stack[-1].append(node)406 go(data.examples[0])407 assert len(stack) == 1408 status = repr(data.status)409 if data.status == Status.INTERESTING:410 status = "%s (%r)" % (status, data.interesting_origin)411 self.debug(412 "%d bytes %r -> %s, %s" % (data.index, stack[0], status, data.output)413 )414 def run(self):415 with local_settings(self.settings):416 try:417 self._run()418 except RunIsComplete:419 pass420 for v in self.interesting_examples.values():421 self.debug_data(v)422 self.debug(423 "Run complete after %d examples (%d valid) and %d shrinks"424 % (self.call_count, self.valid_examples, self.shrinks)425 )426 @property427 def database(self):428 if self.database_key is None:429 return None430 return self.settings.database431 def has_existing_examples(self):432 return self.database is not None and Phase.reuse in self.settings.phases433 def reuse_existing_examples(self):434 """If appropriate (we have a database and have been told to use it),435 try to reload existing examples from the database.436 If there are a lot we don't try all of them. We always try the437 smallest example in the database (which is guaranteed to be the438 last failure) and the largest (which is usually the seed example439 which the last failure came from but we don't enforce that). We440 then take a random sampling of the remainder and try those. Any441 examples that are no longer interesting are cleared out.442 """443 if self.has_existing_examples():444 self.debug("Reusing examples from database")445 # We have to do some careful juggling here. We have two database446 # corpora: The primary and secondary. The primary corpus is a447 # small set of minimized examples each of which has at one point448 # demonstrated a distinct bug. We want to retry all of these.449 # We also have a secondary corpus of examples that have at some450 # point demonstrated interestingness (currently only ones that451 # were previously non-minimal examples of a bug, but this will452 # likely expand in future). These are a good source of potentially453 # interesting examples, but there are a lot of them, so we down454 # sample the secondary corpus to a more manageable size.455 corpus = sorted(456 self.settings.database.fetch(self.database_key), key=sort_key457 )458 factor = 0.1 if (Phase.generate in self.settings.phases) else 1459 desired_size = max(2, ceil(factor * self.settings.max_examples))460 if len(corpus) < desired_size:461 extra_corpus = list(self.settings.database.fetch(self.secondary_key))462 shortfall = desired_size - len(corpus)463 if len(extra_corpus) <= shortfall:464 extra = extra_corpus465 else:466 extra = self.random.sample(extra_corpus, shortfall)467 extra.sort(key=sort_key)468 corpus.extend(extra)469 for existing in corpus:470 data = self.cached_test_function(existing)471 if data.status != Status.INTERESTING:472 self.settings.database.delete(self.database_key, existing)473 self.settings.database.delete(self.secondary_key, existing)474 # If we've not found any interesting examples so far we try some of475 # the pareto front from the last run.476 if len(corpus) < desired_size and not self.interesting_examples:477 desired_extra = desired_size - len(corpus)478 pareto_corpus = list(self.settings.database.fetch(self.pareto_key))479 if len(pareto_corpus) > desired_extra:480 pareto_corpus = self.random.sample(pareto_corpus, desired_extra)481 pareto_corpus.sort(key=sort_key)482 for existing in pareto_corpus:483 data = self.cached_test_function(existing)484 if data not in self.pareto_front:485 self.settings.database.delete(self.pareto_key, existing)486 if data.status == Status.INTERESTING:487 break488 def exit_with(self, reason):489 if self.ignore_limits:490 return491 self.statistics["stopped-because"] = reason.describe(self.settings)492 if self.best_observed_targets:493 self.statistics["targets"] = dict(self.best_observed_targets)494 self.debug("exit_with(%s)" % (reason.name,))495 self.exit_reason = reason496 raise RunIsComplete()497 def should_generate_more(self):498 # End the generation phase where we would have ended it if no bugs had499 # been found. This reproduces the exit logic in `self.test_function`,500 # but with the important distinction that this clause will move on to501 # the shrinking phase having found one or more bugs, while the other502 # will exit having found zero bugs.503 if self.valid_examples >= self.settings.max_examples or self.call_count >= max(504 self.settings.max_examples * 10, 1000505 ): # pragma: no cover506 return False507 # If we haven't found a bug, keep looking - if we hit any limits on508 # the number of tests to run that will raise an exception and stop509 # the run.510 if not self.interesting_examples:511 return True512 # If we've found a bug and won't report more than one, stop looking.513 elif not self.settings.report_multiple_bugs:514 return False515 assert self.first_bug_found_at <= self.last_bug_found_at <= self.call_count516 # Otherwise, keep searching for between ten and 'a heuristic' calls.517 # We cap 'calls after first bug' so errors are reported reasonably518 # soon even for tests that are allowed to run for a very long time,519 # or sooner if the latest half of our test effort has been fruitless.520 return self.call_count < MIN_TEST_CALLS or self.call_count < min(521 self.first_bug_found_at + 1000, self.last_bug_found_at * 2522 )523 def generate_new_examples(self):524 if Phase.generate not in self.settings.phases:525 return526 if self.interesting_examples:527 # The example database has failing examples from a previous run,528 # so we'd rather report that they're still failing ASAP than take529 # the time to look for additional failures.530 return531 self.debug("Generating new examples")532 assert self.should_generate_more()533 zero_data = self.cached_test_function(bytes(BUFFER_SIZE))534 if zero_data.status > Status.OVERRUN:535 self.__data_cache.pin(zero_data.buffer)536 if zero_data.status == Status.OVERRUN or (537 zero_data.status == Status.VALID and len(zero_data.buffer) * 2 > BUFFER_SIZE538 ):539 fail_health_check(540 self.settings,541 "The smallest natural example for your test is extremely "542 "large. This makes it difficult for Hypothesis to generate "543 "good examples, especially when trying to reduce failing ones "544 "at the end. Consider reducing the size of your data if it is "545 "of a fixed size. You could also fix this by improving how "546 "your data shrinks (see https://hypothesis.readthedocs.io/en/"547 "latest/data.html#shrinking for details), or by introducing "548 "default values inside your strategy. e.g. could you replace "549 "some arguments with their defaults by using "550 "one_of(none(), some_complex_strategy)?",551 HealthCheck.large_base_example,552 )553 self.health_check_state = HealthCheckState()554 # We attempt to use the size of the minimal generated test case starting555 # from a given novel prefix as a guideline to generate smaller test556 # cases for an initial period, by restriscting ourselves to test cases557 # that are not much larger than it.558 #559 # Calculating the actual minimal generated test case is hard, so we560 # take a best guess that zero extending a prefix produces the minimal561 # test case starting with that prefix (this is true for our built in562 # strategies). This is only a reasonable thing to do if the resulting563 # test case is valid. If we regularly run into situations where it is564 # not valid then this strategy is a waste of time, so we want to565 # abandon it early. In order to do this we track how many times in a566 # row it has failed to work, and abort small test case generation when567 # it has failed too many times in a row.568 consecutive_zero_extend_is_invalid = 0569 # We control growth during initial example generation, for two570 # reasons:571 #572 # * It gives us an opportunity to find small examples early, which573 # gives us a fast path for easy to find bugs.574 # * It avoids low probability events where we might end up575 # generating very large examples during health checks, which576 # on slower machines can trigger HealthCheck.too_slow.577 #578 # The heuristic we use is that we attempt to estimate the smallest579 # extension of this prefix, and limit the size to no more than580 # an order of magnitude larger than that. If we fail to estimate581 # the size accurately, we skip over this prefix and try again.582 #583 # We need to tune the example size based on the initial prefix,584 # because any fixed size might be too small, and any size based585 # on the strategy in general can fall afoul of strategies that586 # have very different sizes for different prefixes.587 small_example_cap = clamp(10, self.settings.max_examples // 10, 50)588 optimise_at = max(self.settings.max_examples // 2, small_example_cap + 1)589 ran_optimisations = False590 while self.should_generate_more():591 prefix = self.generate_novel_prefix()592 assert len(prefix) <= BUFFER_SIZE593 if (594 self.valid_examples <= small_example_cap595 and self.call_count <= 5 * small_example_cap596 and not self.interesting_examples597 and consecutive_zero_extend_is_invalid < 5598 ):599 minimal_example = self.cached_test_function(600 prefix + bytes(BUFFER_SIZE - len(prefix))601 )602 if minimal_example.status < Status.VALID:603 consecutive_zero_extend_is_invalid += 1604 continue605 consecutive_zero_extend_is_invalid = 0606 minimal_extension = len(minimal_example.buffer) - len(prefix)607 max_length = min(len(prefix) + minimal_extension * 10, BUFFER_SIZE)608 # We could end up in a situation where even though the prefix was609 # novel when we generated it, because we've now tried zero extending610 # it not all possible continuations of it will be novel. In order to611 # avoid making redundant test calls, we rerun it in simulation mode612 # first. If this has a predictable result, then we don't bother613 # running the test function for real here. If however we encounter614 # some novel behaviour, we try again with the real test function,615 # starting from the new novel prefix that has discovered.616 try:617 trial_data = self.new_conjecture_data(618 prefix=prefix, max_length=max_length619 )620 self.tree.simulate_test_function(trial_data)621 continue622 except PreviouslyUnseenBehaviour:623 pass624 # If the simulation entered part of the tree that has been killed,625 # we don't want to run this.626 if trial_data.observer.killed:627 continue628 # We might have hit the cap on number of examples we should629 # run when calculating the minimal example.630 if not self.should_generate_more():631 break632 prefix = trial_data.buffer633 else:634 max_length = BUFFER_SIZE635 data = self.new_conjecture_data(prefix=prefix, max_length=max_length)636 self.test_function(data)637 self.generate_mutations_from(data)638 # Although the optimisations are logically a distinct phase, we639 # actually normally run them as part of example generation. The640 # reason for this is that we cannot guarantee that optimisation641 # actually exhausts our budget: It might finish running and we642 # discover that actually we still could run a bunch more test cases643 # if we want.644 if (645 self.valid_examples >= max(small_example_cap, optimise_at)646 and not ran_optimisations647 ):648 ran_optimisations = True649 self.optimise_targets()650 def generate_mutations_from(self, data):651 # A thing that is often useful but rarely happens by accident is652 # to generate the same value at multiple different points in the653 # test case.654 #655 # Rather than make this the responsibility of individual strategies656 # we implement a small mutator that just takes parts of the test657 # case with the same label and tries replacing one of them with a658 # copy of the other and tries running it. If we've made a good659 # guess about what to put where, this will run a similar generated660 # test case with more duplication.661 if (662 # An OVERRUN doesn't have enough information about the test663 # case to mutate, so we just skip those.664 data.status >= Status.INVALID665 # This has a tendency to trigger some weird edge cases during666 # generation so we don't let it run until we're done with the667 # health checks.668 and self.health_check_state is None669 ):670 initial_calls = self.call_count671 failed_mutations = 0672 while (673 self.should_generate_more()674 # We implement fairly conservative checks for how long we675 # we should run mutation for, as it's generally not obvious676 # how helpful it is for any given test case.677 and self.call_count <= initial_calls + 5678 and failed_mutations <= 5679 ):680 groups = data.examples.mutator_groups681 if not groups:682 break683 group = self.random.choice(groups)684 ex1, ex2 = [685 data.examples[i] for i in sorted(self.random.sample(group, 2))686 ]687 assert ex1.end <= ex2.start688 replacements = [data.buffer[e.start : e.end] for e in [ex1, ex2]]689 replacement = self.random.choice(replacements)690 try:691 # We attempt to replace both the the examples with692 # whichever choice we made. Note that this might end693 # up messing up and getting the example boundaries694 # wrong - labels matching are only a best guess as to695 # whether the two are equivalent - but it doesn't696 # really matter. It may not achieve the desired result697 # but it's still a perfectly acceptable choice sequence.698 # to try.699 new_data = self.cached_test_function(700 data.buffer[: ex1.start]701 + replacement702 + data.buffer[ex1.end : ex2.start]703 + replacement704 + data.buffer[ex2.end :],705 # We set error_on_discard so that we don't end up706 # entering parts of the tree we consider redundant707 # and not worth exploring.708 error_on_discard=True,709 extend=BUFFER_SIZE,710 )711 except ContainsDiscard:712 failed_mutations += 1713 continue714 if (715 new_data.status >= data.status716 and data.buffer != new_data.buffer717 and all(718 k in new_data.target_observations719 and new_data.target_observations[k] >= v720 for k, v in data.target_observations.items()721 )722 ):723 data = new_data724 failed_mutations = 0725 else:726 failed_mutations += 1727 def optimise_targets(self):728 """If any target observations have been made, attempt to optimise them729 all."""730 if not self.should_optimise:731 return732 from hypothesis.internal.conjecture.optimiser import Optimiser733 # We want to avoid running the optimiser for too long in case we hit734 # an unbounded target score. We start this off fairly conservatively735 # in case interesting examples are easy to find and then ramp it up736 # on an exponential schedule so we don't hamper the optimiser too much737 # if it needs a long time to find good enough improvements.738 max_improvements = 10739 while True:740 prev_calls = self.call_count741 any_improvements = False742 for target, data in list(self.best_examples_of_observed_targets.items()):743 optimiser = Optimiser(744 self, data, target, max_improvements=max_improvements745 )746 optimiser.run()747 if optimiser.improvements > 0:748 any_improvements = True749 if self.interesting_examples:750 break751 max_improvements *= 2752 if any_improvements:753 continue754 self.pareto_optimise()755 if prev_calls == self.call_count:756 break757 def pareto_optimise(self):758 if self.pareto_front is not None:759 ParetoOptimiser(self).run()760 def _run(self):761 with self._log_phase_statistics("reuse"):762 self.reuse_existing_examples()763 with self._log_phase_statistics("generate"):764 self.generate_new_examples()765 # We normally run the targeting phase mixed in with the generate phase,766 # but if we've been asked to run it but not generation then we have to767 # run it explciitly on its own here.768 if Phase.generate not in self.settings.phases:769 self.optimise_targets()770 with self._log_phase_statistics("shrink"):771 self.shrink_interesting_examples()772 self.exit_with(ExitReason.finished)773 def new_conjecture_data(self, prefix, max_length=BUFFER_SIZE, observer=None):774 return ConjectureData(775 prefix=prefix,776 max_length=max_length,777 random=self.random,778 observer=observer or self.tree.new_observer(),779 )780 def new_conjecture_data_for_buffer(self, buffer):781 return ConjectureData.for_buffer(buffer, observer=self.tree.new_observer())782 def shrink_interesting_examples(self):783 """If we've found interesting examples, try to replace each of them784 with a minimal interesting example with the same interesting_origin....

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful