How to use wrapped_test method in hypothesis

Best Python code snippet using hypothesis

core.py

Source:core.py Github

copy

Full Screen

...192 When the user runs a subset of tests (e.g via ``pytest -k``), errors will193 only be reported for tests that actually ran.194 """195 def invalid(message):196 def wrapped_test(*arguments, **kwargs):197 raise InvalidArgument(message)198 wrapped_test.is_hypothesis_test = True199 return wrapped_test200 if not (given_arguments or given_kwargs):201 return invalid("given must be called with at least one argument")202 if given_arguments and any(203 [original_argspec.varargs, original_argspec.varkw, original_argspec.kwonlyargs]204 ):205 return invalid(206 "positional arguments to @given are not supported with varargs, "207 "varkeywords, or keyword-only arguments"208 )209 if len(given_arguments) > len(original_argspec.args):210 args = tuple(given_arguments)211 return invalid(212 "Too many positional arguments for %s() were passed to @given "213 "- expected at most %d arguments, but got %d %r"214 % (name, len(original_argspec.args), len(args), args)215 )216 if infer in given_arguments:217 return invalid(218 "infer was passed as a positional argument to @given, "219 "but may only be passed as a keyword argument"220 )221 if given_arguments and given_kwargs:222 return invalid("cannot mix positional and keyword arguments to @given")223 extra_kwargs = [224 k225 for k in given_kwargs226 if k not in original_argspec.args + original_argspec.kwonlyargs227 ]228 if extra_kwargs and not original_argspec.varkw:229 arg = extra_kwargs[0]230 return invalid(231 "%s() got an unexpected keyword argument %r, from `%s=%r` in @given"232 % (name, arg, arg, given_kwargs[arg])233 )234 if original_argspec.defaults or original_argspec.kwonlydefaults:235 return invalid("Cannot apply @given to a function with defaults.")236 missing = [repr(kw) for kw in original_argspec.kwonlyargs if kw not in given_kwargs]237 if missing:238 return invalid(239 "Missing required kwarg{}: {}".format(240 "s" if len(missing) > 1 else "", ", ".join(missing)241 )242 )243class ArtificialDataForExample(ConjectureData):244 """Dummy object that pretends to be a ConjectureData object for the purposes of245 drawing arguments for @example. Provides just enough of the ConjectureData API246 to allow the test to run. Does not support any sort of interactive drawing,247 but that's fine because you can't access that when all of your arguments are248 provided by @example.249 """250 def __init__(self, kwargs):251 self.__draws = 0252 self.__kwargs = kwargs253 super().__init__(max_length=0, prefix=b"", random=None)254 def draw_bits(self, n):255 raise NotImplementedError("Dummy object should never be asked for bits.")256 def draw(self, strategy):257 assert self.__draws == 0258 self.__draws += 1259 # The main strategy for given is always a tuples strategy that returns260 # first positional arguments then keyword arguments. When building this261 # object already converted all positional arguments to keyword arguments,262 # so this is the correct format to return.263 return (), self.__kwargs264def execute_explicit_examples(state, wrapped_test, arguments, kwargs):265 original_argspec = getfullargspec(state.test)266 for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())):267 example_kwargs = dict(original_argspec.kwonlydefaults or {})268 if example.args:269 if len(example.args) > len(original_argspec.args):270 raise InvalidArgument(271 "example has too many arguments for test. "272 "Expected at most %d but got %d"273 % (len(original_argspec.args), len(example.args))274 )275 example_kwargs.update(276 dict(zip(original_argspec.args[-len(example.args) :], example.args))277 )278 else:279 example_kwargs.update(example.kwargs)280 if Phase.explicit not in state.settings.phases:281 continue282 example_kwargs.update(kwargs)283 with local_settings(state.settings):284 fragments_reported = []285 try:286 with with_reporter(fragments_reported.append):287 state.execute_once(288 ArtificialDataForExample(example_kwargs),289 is_final=True,290 print_example=True,291 )292 except UnsatisfiedAssumption:293 # Odd though it seems, we deliberately support explicit examples that294 # are then rejected by a call to `assume()`. As well as iterative295 # development, this is rather useful to replay Hypothesis' part of296 # a saved failure when other arguments are supplied by e.g. pytest.297 # See https://github.com/HypothesisWorks/hypothesis/issues/2125298 pass299 except BaseException as err:300 # In order to support reporting of multiple failing examples, we yield301 # each of the (report text, error) pairs we find back to the top-level302 # runner. This also ensures that user-facing stack traces have as few303 # frames of Hypothesis internals as possible.304 yield (fragments_reported, err.with_traceback(get_trimmed_traceback()))305 if state.settings.report_multiple_bugs:306 continue307 break308 finally:309 assert fragments_reported[0].startswith("Falsifying example")310 fragments_reported[0] = fragments_reported[0].replace(311 "Falsifying example", "Falsifying explicit example", 1312 )313 verbose_report(fragments_reported[0].replace("Falsifying", "Trying", 1))314 for f in fragments_reported[1:]:315 verbose_report(f)316def get_random_for_wrapped_test(test, wrapped_test):317 settings = wrapped_test._hypothesis_internal_use_settings318 wrapped_test._hypothesis_internal_use_generated_seed = None319 if wrapped_test._hypothesis_internal_use_seed is not None:320 return Random(wrapped_test._hypothesis_internal_use_seed)321 elif settings.derandomize:322 return Random(int_from_bytes(function_digest(test)))323 elif global_force_seed is not None:324 return Random(global_force_seed)325 else:326 seed = rnd_module.getrandbits(128)327 wrapped_test._hypothesis_internal_use_generated_seed = seed328 return Random(seed)329def process_arguments_to_given(wrapped_test, arguments, kwargs, given_kwargs, argspec):330 selfy = None331 arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)332 # If the test function is a method of some kind, the bound object333 # will be the first named argument if there are any, otherwise the334 # first vararg (if any).335 if argspec.args:336 selfy = kwargs.get(argspec.args[0])337 elif arguments:338 selfy = arguments[0]339 # Ensure that we don't mistake mocks for self here.340 # This can cause the mock to be used as the test runner.341 if is_mock(selfy):342 selfy = None343 test_runner = new_style_executor(selfy)344 arguments = tuple(arguments)345 # We use TupleStrategy over tuples() here to avoid polluting346 # st.STRATEGY_CACHE with references (see #493), and because this is347 # trivial anyway if the fixed_dictionaries strategy is cacheable.348 search_strategy = TupleStrategy(349 (350 st.just(arguments),351 st.fixed_dictionaries(given_kwargs).map(lambda args: dict(args, **kwargs)),352 )353 )354 if selfy is not None:355 search_strategy = WithRunner(search_strategy, selfy)356 search_strategy.validate()357 return arguments, kwargs, test_runner, search_strategy358def skip_exceptions_to_reraise():359 """Return a tuple of exceptions meaning 'skip this test', to re-raise.360 This is intended to cover most common test runners; if you would361 like another to be added please open an issue or pull request adding362 it to this function and to tests/cover/test_lazy_import.py363 """364 # This is a set because nose may simply re-export unittest.SkipTest365 exceptions = set()366 # We use this sys.modules trick to avoid importing libraries -367 # you can't be an instance of a type from an unimported module!368 # This is fast enough that we don't need to cache the result,369 # and more importantly it avoids possible side-effects :-)370 if "unittest" in sys.modules:371 exceptions.add(sys.modules["unittest"].SkipTest)372 if "unittest2" in sys.modules: # pragma: no cover373 exceptions.add(sys.modules["unittest2"].SkipTest)374 if "nose" in sys.modules: # pragma: no cover375 exceptions.add(sys.modules["nose"].SkipTest)376 if "_pytest" in sys.modules: # pragma: no branch377 exceptions.add(sys.modules["_pytest"].outcomes.Skipped)378 return tuple(sorted(exceptions, key=str))379def failure_exceptions_to_catch():380 """Return a tuple of exceptions meaning 'this test has failed', to catch.381 This is intended to cover most common test runners; if you would382 like another to be added please open an issue or pull request.383 """384 exceptions = [Exception]385 if "_pytest" in sys.modules: # pragma: no branch386 exceptions.append(sys.modules["_pytest"].outcomes.Failed)387 return tuple(exceptions)388def new_given_argspec(original_argspec, given_kwargs):389 """Make an updated argspec for the wrapped test."""390 new_args = [a for a in original_argspec.args if a not in given_kwargs]391 new_kwonlyargs = [a for a in original_argspec.kwonlyargs if a not in given_kwargs]392 annots = {393 k: v394 for k, v in original_argspec.annotations.items()395 if k in new_args + new_kwonlyargs396 }397 annots["return"] = None398 return original_argspec._replace(399 args=new_args, kwonlyargs=new_kwonlyargs, annotations=annots400 )401class StateForActualGivenExecution:402 def __init__(403 self, test_runner, search_strategy, test, settings, random, wrapped_test404 ):405 self.test_runner = test_runner406 self.search_strategy = search_strategy407 self.settings = settings408 self.last_exception = None409 self.falsifying_examples = ()410 self.__was_flaky = False411 self.random = random412 self.__warned_deadline = False413 self.__test_runtime = None414 self.__had_seed = wrapped_test._hypothesis_internal_use_seed415 self.is_find = getattr(wrapped_test, "_hypothesis_internal_is_find", False)416 self.wrapped_test = wrapped_test417 self.test = test418 self.print_given_args = getattr(419 wrapped_test, "_hypothesis_internal_print_given_args", True420 )421 self.files_to_propagate = set()422 self.failed_normally = False423 def execute_once(424 self, data, print_example=False, is_final=False, expected_failure=None425 ):426 """Run the test function once, using ``data`` as input.427 If the test raises an exception, it will propagate through to the428 caller of this method. Depending on its type, this could represent429 an ordinary test failure, or a fatal error, or a control exception.430 If this method returns normally, the test might have passed, or431 it might have placed ``data`` in an unsuccessful state and then432 swallowed the corresponding control exception.433 """434 data.is_find = self.is_find435 text_repr = [None]436 if self.settings.deadline is None:437 test = self.test438 else:439 @proxies(self.test)440 def test(*args, **kwargs):441 self.__test_runtime = None442 initial_draws = len(data.draw_times)443 start = time.perf_counter()444 result = self.test(*args, **kwargs)445 finish = time.perf_counter()446 internal_draw_time = sum(data.draw_times[initial_draws:])447 runtime = datetime.timedelta(448 seconds=finish - start - internal_draw_time449 )450 self.__test_runtime = runtime451 current_deadline = self.settings.deadline452 if not is_final:453 current_deadline = (current_deadline // 4) * 5454 if runtime >= current_deadline:455 raise DeadlineExceeded(runtime, self.settings.deadline)456 return result457 def run(data):458 # Set up dynamic context needed by a single test run.459 with local_settings(self.settings):460 with deterministic_PRNG():461 with BuildContext(data, is_final=is_final):462 # Generate all arguments to the test function.463 args, kwargs = data.draw(self.search_strategy)464 if expected_failure is not None:465 text_repr[0] = arg_string(test, args, kwargs)466 if print_example or current_verbosity() >= Verbosity.verbose:467 output = StringIO()468 printer = RepresentationPrinter(output)469 if print_example:470 printer.text("Falsifying example:")471 else:472 printer.text("Trying example:")473 if self.print_given_args:474 printer.text(" ")475 printer.text(test.__name__)476 with printer.group(indent=4, open="(", close=""):477 printer.break_()478 for v in args:479 printer.pretty(v)480 # We add a comma unconditionally because481 # generated arguments will always be482 # kwargs, so there will always be more483 # to come.484 printer.text(",")485 printer.breakable()486 # We need to make sure to print these in the487 # argument order for Python 2 and older versions488 # of Python 3.5. In modern versions this isn't489 # an issue because kwargs is ordered.490 arg_order = {491 v: i492 for i, v in enumerate(493 getfullargspec(self.test).args494 )495 }496 for i, (k, v) in enumerate(497 sorted(498 kwargs.items(),499 key=lambda t: (500 arg_order.get(t[0], float("inf")),501 t[0],502 ),503 )504 ):505 printer.text(k)506 printer.text("=")507 printer.pretty(v)508 printer.text(",")509 if i + 1 < len(kwargs):510 printer.breakable()511 printer.break_()512 printer.text(")")513 printer.flush()514 report(output.getvalue())515 return test(*args, **kwargs)516 # Run the test function once, via the executor hook.517 # In most cases this will delegate straight to `run(data)`.518 result = self.test_runner(data, run)519 # If a failure was expected, it should have been raised already, so520 # instead raise an appropriate diagnostic error.521 if expected_failure is not None:522 exception, traceback = expected_failure523 if (524 isinstance(exception, DeadlineExceeded)525 and self.__test_runtime is not None526 ):527 report(528 (529 "Unreliable test timings! On an initial run, this "530 "test took %.2fms, which exceeded the deadline of "531 "%.2fms, but on a subsequent run it took %.2f ms, "532 "which did not. If you expect this sort of "533 "variability in your test timings, consider turning "534 "deadlines off for this test by setting deadline=None."535 )536 % (537 exception.runtime.total_seconds() * 1000,538 self.settings.deadline.total_seconds() * 1000,539 self.__test_runtime.total_seconds() * 1000,540 )541 )542 else:543 report("Failed to reproduce exception. Expected: \n" + traceback)544 self.__flaky(545 (546 "Hypothesis %s(%s) produces unreliable results: Falsified"547 " on the first call but did not on a subsequent one"548 )549 % (test.__name__, text_repr[0])550 )551 return result552 def _execute_once_for_engine(self, data):553 """Wrapper around ``execute_once`` that intercepts test failure554 exceptions and single-test control exceptions, and turns them into555 appropriate method calls to `data` instead.556 This allows the engine to assume that any exception other than557 ``StopTest`` must be a fatal error, and should stop the entire engine.558 """559 try:560 result = self.execute_once(data)561 if result is not None:562 fail_health_check(563 self.settings,564 (565 "Tests run under @given should return None, but "566 "%s returned %r instead."567 )568 % (self.test.__name__, result),569 HealthCheck.return_value,570 )571 except UnsatisfiedAssumption:572 # An "assume" check failed, so instead we inform the engine that573 # this test run was invalid.574 data.mark_invalid()575 except StopTest:576 # The engine knows how to handle this control exception, so it's577 # OK to re-raise it.578 raise579 except (580 HypothesisDeprecationWarning,581 FailedHealthCheck,582 ) + skip_exceptions_to_reraise():583 # These are fatal errors or control exceptions that should stop the584 # engine, so we re-raise them.585 raise586 except failure_exceptions_to_catch() as e:587 # If the error was raised by Hypothesis-internal code, re-raise it588 # as a fatal error instead of treating it as a test failure.589 escalate_hypothesis_internal_error()590 if data.frozen:591 # This can happen if an error occurred in a finally592 # block somewhere, suppressing our original StopTest.593 # We raise a new one here to resume normal operation.594 raise StopTest(data.testcounter)595 else:596 # The test failed by raising an exception, so we inform the597 # engine that this test run was interesting. This is the normal598 # path for test runs that fail.599 tb = get_trimmed_traceback()600 info = data.extra_information601 info.__expected_traceback = "".join(602 traceback.format_exception(type(e), e, tb)603 )604 info.__expected_exception = e605 verbose_report(info.__expected_traceback)606 origin = traceback.extract_tb(tb)[-1]607 filename = origin[0]608 lineno = origin[1]609 data.mark_interesting((type(e), filename, lineno))610 def run_engine(self):611 """Run the test function many times, on database input and generated612 input, using the Conjecture engine.613 """614 # Tell pytest to omit the body of this function from tracebacks615 __tracebackhide__ = True616 try:617 database_key = self.wrapped_test._hypothesis_internal_database_key618 except AttributeError:619 if global_force_seed is None:620 database_key = function_digest(self.test)621 else:622 database_key = None623 runner = ConjectureRunner(624 self._execute_once_for_engine,625 settings=self.settings,626 random=self.random,627 database_key=database_key,628 )629 # Use the Conjecture engine to run the test function many times630 # on different inputs.631 runner.run()632 note_statistics(runner.statistics)633 if runner.call_count == 0:634 return635 if runner.interesting_examples:636 self.falsifying_examples = sorted(637 runner.interesting_examples.values(),638 key=lambda d: sort_key(d.buffer),639 reverse=True,640 )641 else:642 if runner.valid_examples == 0:643 raise Unsatisfiable(644 "Unable to satisfy assumptions of hypothesis %s."645 % (get_pretty_function_description(self.test),)646 )647 if not self.falsifying_examples:648 return649 elif not self.settings.report_multiple_bugs:650 # Pretend that we only found one failure, by discarding the others.651 del self.falsifying_examples[:-1]652 # The engine found one or more failures, so we need to reproduce and653 # report them.654 self.failed_normally = True655 flaky = 0656 if runner.best_observed_targets:657 for line in describe_targets(runner.best_observed_targets):658 report(line)659 report("")660 for falsifying_example in self.falsifying_examples:661 info = falsifying_example.extra_information662 ran_example = ConjectureData.for_buffer(falsifying_example.buffer)663 self.__was_flaky = False664 assert info.__expected_exception is not None665 try:666 self.execute_once(667 ran_example,668 print_example=not self.is_find,669 is_final=True,670 expected_failure=(671 info.__expected_exception,672 info.__expected_traceback,673 ),674 )675 except (UnsatisfiedAssumption, StopTest):676 report(traceback.format_exc())677 self.__flaky(678 "Unreliable assumption: An example which satisfied "679 "assumptions on the first run now fails it."680 )681 except BaseException as e:682 if len(self.falsifying_examples) <= 1:683 # There is only one failure, so we can report it by raising684 # it directly.685 raise686 # We are reporting multiple failures, so we need to manually687 # print each exception's stack trace and information.688 tb = get_trimmed_traceback()689 report("".join(traceback.format_exception(type(e), e, tb)))690 finally: # pragma: no cover691 # Mostly useful for ``find`` and ensuring that objects that692 # hold on to a reference to ``data`` know that it's now been693 # finished and they shouldn't attempt to draw more data from694 # it.695 ran_example.freeze()696 # This section is in fact entirely covered by the tests in697 # test_reproduce_failure, but it seems to trigger a lovely set698 # of coverage bugs: The branches show up as uncovered (despite699 # definitely being covered - you can add an assert False else700 # branch to verify this and see it fail - and additionally the701 # second branch still complains about lack of coverage even if702 # you add a pragma: no cover to it!703 # See https://bitbucket.org/ned/coveragepy/issues/623/704 if self.settings.print_blob:705 report(706 (707 "\nYou can reproduce this example by temporarily "708 "adding @reproduce_failure(%r, %r) as a decorator "709 "on your test case"710 )711 % (__version__, encode_failure(falsifying_example.buffer))712 )713 if self.__was_flaky:714 flaky += 1715 # If we only have one example then we should have raised an error or716 # flaky prior to this point.717 assert len(self.falsifying_examples) > 1718 if flaky > 0:719 raise Flaky(720 (721 "Hypothesis found %d distinct failures, but %d of them "722 "exhibited some sort of flaky behaviour."723 )724 % (len(self.falsifying_examples), flaky)725 )726 else:727 raise MultipleFailures(728 ("Hypothesis found %d distinct failures.")729 % (len(self.falsifying_examples))730 )731 def __flaky(self, message):732 if len(self.falsifying_examples) <= 1:733 raise Flaky(message)734 else:735 self.__was_flaky = True736 report("Flaky example! " + message)737@contextlib.contextmanager738def fake_subTest(self, msg=None, **__):739 """Monkeypatch for `unittest.TestCase.subTest` during `@given`.740 If we don't patch this out, each failing example is reported as a741 separate failing test by the unittest test runner, which is742 obviously incorrect. We therefore replace it for the duration with743 this version.744 """745 warnings.warn(746 "subTest per-example reporting interacts badly with Hypothesis "747 "trying hundreds of examples, so we disable it for the duration of "748 "any test that uses `@given`.",749 HypothesisWarning,750 stacklevel=2,751 )752 yield753@attr.s()754class HypothesisHandle:755 """This object is provided as the .hypothesis attribute on @given tests.756 Downstream users can reassign its attributes to insert custom logic into757 the execution of each case, for example by converting an async into a758 sync function.759 This must be an attribute of an attribute, because reassignment of a760 first-level attribute would not be visible to Hypothesis if the function761 had been decorated before the assignment.762 See https://github.com/HypothesisWorks/hypothesis/issues/1257 for more763 information.764 """765 inner_test = attr.ib()766 _get_fuzz_target = attr.ib()767 _given_kwargs = attr.ib()768 @property769 def fuzz_one_input(770 self,771 ) -> Callable[[Union[bytes, bytearray, memoryview, BinaryIO]], Optional[bytes]]:772 """Run the test as a fuzz target, driven with the `buffer` of bytes.773 Returns None if buffer invalid for the strategy, canonical pruned774 bytes if the buffer was valid, and leaves raised exceptions alone.775 Note: this feature is experimental and may change or be removed.776 """777 # Note: most users, if they care about fuzzer performance, will access the778 # property and assign it to a local variable to move the attribute lookup779 # outside their fuzzing loop / before the fork point. We cache it anyway,780 # so that naive or unusual use-cases get the best possible performance too.781 try:782 return self.__cached_target # type: ignore783 except AttributeError:784 self.__cached_target = self._get_fuzz_target()785 return self.__cached_target786def given(787 *_given_arguments: Union[SearchStrategy, InferType],788 **_given_kwargs: Union[SearchStrategy, InferType],789) -> Callable[[Callable[..., None]], Callable[..., None]]:790 """A decorator for turning a test function that accepts arguments into a791 randomized test.792 This is the main entry point to Hypothesis.793 """794 def run_test_as_given(test):795 if inspect.isclass(test):796 # Provide a meaningful error to users, instead of exceptions from797 # internals that assume we're dealing with a function.798 raise InvalidArgument("@given cannot be applied to a class.")799 given_arguments = tuple(_given_arguments)800 given_kwargs = dict(_given_kwargs)801 original_argspec = getfullargspec(test)802 check_invalid = is_invalid_test(803 test.__name__, original_argspec, given_arguments, given_kwargs804 )805 # If the argument check found problems, return a dummy test function806 # that will raise an error if it is actually called.807 if check_invalid is not None:808 return check_invalid809 # Because the argument check succeeded, we can convert @given's810 # positional arguments into keyword arguments for simplicity.811 if given_arguments:812 assert not given_kwargs813 for name, strategy in zip(814 reversed(original_argspec.args), reversed(given_arguments)815 ):816 given_kwargs[name] = strategy817 # These have been converted, so delete them to prevent accidental use.818 del given_arguments819 argspec = new_given_argspec(original_argspec, given_kwargs)820 # Use type information to convert "infer" arguments into appropriate strategies.821 if infer in given_kwargs.values():822 hints = get_type_hints(test)823 for name in [name for name, value in given_kwargs.items() if value is infer]:824 if name not in hints:825 # As usual, we want to emit this error when the test is executed,826 # not when it's decorated.827 @impersonate(test)828 @define_function_signature(test.__name__, test.__doc__, argspec)829 def wrapped_test(*arguments, **kwargs):830 __tracebackhide__ = True831 raise InvalidArgument(832 "passed %s=infer for %s, but %s has no type annotation"833 % (name, test.__name__, name)834 )835 return wrapped_test836 given_kwargs[name] = st.from_type(hints[name])837 @impersonate(test)838 @define_function_signature(test.__name__, test.__doc__, argspec)839 def wrapped_test(*arguments, **kwargs):840 # Tell pytest to omit the body of this function from tracebacks841 __tracebackhide__ = True842 test = wrapped_test.hypothesis.inner_test843 if getattr(test, "is_hypothesis_test", False):844 raise InvalidArgument(845 (846 "You have applied @given to the test %s more than once, which "847 "wraps the test several times and is extremely slow. A "848 "similar effect can be gained by combining the arguments "849 "of the two calls to given. For example, instead of "850 "@given(booleans()) @given(integers()), you could write "851 "@given(booleans(), integers())"852 )853 % (test.__name__,)854 )855 settings = wrapped_test._hypothesis_internal_use_settings856 random = get_random_for_wrapped_test(test, wrapped_test)857 processed_args = process_arguments_to_given(858 wrapped_test, arguments, kwargs, given_kwargs, argspec859 )860 arguments, kwargs, test_runner, search_strategy = processed_args861 runner = getattr(search_strategy, "runner", None)862 if isinstance(runner, TestCase) and test.__name__ in dir(TestCase):863 msg = (864 "You have applied @given to the method %s, which is "865 "used by the unittest runner but is not itself a test."866 " This is not useful in any way." % test.__name__867 )868 fail_health_check(settings, msg, HealthCheck.not_a_test_method)869 if bad_django_TestCase(runner): # pragma: no cover870 # Covered by the Django tests, but not the pytest coverage task871 raise InvalidArgument(872 "You have applied @given to a method on %s, but this "873 "class does not inherit from the supported versions in "874 "`hypothesis.extra.django`. Use the Hypothesis variants "875 "to ensure that each example is run in a separate "876 "database transaction." % qualname(type(runner))877 )878 state = StateForActualGivenExecution(879 test_runner, search_strategy, test, settings, random, wrapped_test880 )881 reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure882 # If there was a @reproduce_failure decorator, use it to reproduce883 # the error (or complain that we couldn't). Either way, this will884 # always raise some kind of error.885 if reproduce_failure is not None:886 expected_version, failure = reproduce_failure887 if expected_version != __version__:888 raise InvalidArgument(889 (890 "Attempting to reproduce a failure from a different "891 "version of Hypothesis. This failure is from %s, but "892 "you are currently running %r. Please change your "893 "Hypothesis version to a matching one."894 )895 % (expected_version, __version__)896 )897 try:898 state.execute_once(899 ConjectureData.for_buffer(decode_failure(failure)),900 print_example=True,901 is_final=True,902 )903 raise DidNotReproduce(904 "Expected the test to raise an error, but it "905 "completed successfully."906 )907 except StopTest:908 raise DidNotReproduce(909 "The shape of the test data has changed in some way "910 "from where this blob was defined. Are you sure "911 "you're running the same test?"912 )913 except UnsatisfiedAssumption:914 raise DidNotReproduce(915 "The test data failed to satisfy an assumption in the "916 "test. Have you added it since this blob was "917 "generated?"918 )919 # There was no @reproduce_failure, so start by running any explicit920 # examples from @example decorators.921 errors = list(922 execute_explicit_examples(state, wrapped_test, arguments, kwargs)923 )924 with local_settings(state.settings):925 if len(errors) > 1:926 # If we're not going to report multiple bugs, we would have927 # stopped running explicit examples at the first failure.928 assert state.settings.report_multiple_bugs929 for fragments, err in errors:930 for f in fragments:931 report(f)932 tb_lines = traceback.format_exception(933 type(err), err, err.__traceback__934 )935 report("".join(tb_lines))936 msg = "Hypothesis found %d failures in explicit examples."937 raise MultipleFailures(msg % (len(errors)))938 elif errors:939 fragments, the_error_hypothesis_found = errors[0]940 for f in fragments:941 report(f)942 raise the_error_hypothesis_found943 # If there were any explicit examples, they all ran successfully.944 # The next step is to use the Conjecture engine to run the test on945 # many different inputs.946 if not (947 Phase.reuse in settings.phases or Phase.generate in settings.phases948 ):949 return950 try:951 if isinstance(runner, TestCase) and hasattr(runner, "subTest"):952 subTest = runner.subTest953 try:954 runner.subTest = types.MethodType(fake_subTest, runner)955 state.run_engine()956 finally:957 runner.subTest = subTest958 else:959 state.run_engine()960 except BaseException as e:961 # The exception caught here should either be an actual test962 # failure (or MultipleFailures), or some kind of fatal error963 # that caused the engine to stop.964 generated_seed = wrapped_test._hypothesis_internal_use_generated_seed965 with local_settings(settings):966 if not (state.failed_normally or generated_seed is None):967 if running_under_pytest:968 report(969 "You can add @seed(%(seed)d) to this test or "970 "run pytest with --hypothesis-seed=%(seed)d "971 "to reproduce this failure." % {"seed": generated_seed}972 )973 else:974 report(975 "You can add @seed(%d) to this test to "976 "reproduce this failure." % (generated_seed,)977 )978 # The dance here is to avoid showing users long tracebacks979 # full of Hypothesis internals they don't care about.980 # We have to do this inline, to avoid adding another981 # internal stack frame just when we've removed the rest.982 #983 # Using a variable for our trimmed error ensures that the line984 # which will actually appear in tracebacks is as clear as985 # possible - "raise the_error_hypothesis_found".986 the_error_hypothesis_found = e.with_traceback(987 get_trimmed_traceback()988 )989 raise the_error_hypothesis_found990 def _get_fuzz_target() -> Callable[991 [Union[bytes, bytearray, memoryview, BinaryIO]], Optional[bytes]992 ]:993 # Because fuzzing interfaces are very performance-sensitive, we use a994 # somewhat more complicated structure here. `_get_fuzz_target()` is995 # called by the `HypothesisHandle.fuzz_one_input` property, allowing996 # us to defer our collection of the settings, random instance, and997 # reassignable `inner_test` (etc) until `fuzz_one_input` is accessed.998 #999 # We then share the performance cost of setting up `state` between1000 # many invocations of the target. We explicitly force `deadline=None`1001 # for performance reasons, saving ~40% the runtime of an empty test.1002 test = wrapped_test.hypothesis.inner_test1003 settings = Settings(1004 parent=wrapped_test._hypothesis_internal_use_settings, deadline=None1005 )1006 random = get_random_for_wrapped_test(test, wrapped_test)1007 _args, _kwargs, test_runner, search_strategy = process_arguments_to_given(1008 wrapped_test, (), {}, given_kwargs, argspec1009 )1010 assert not _args1011 assert not _kwargs1012 state = StateForActualGivenExecution(1013 test_runner, search_strategy, test, settings, random, wrapped_test1014 )1015 digest = function_digest(test)1016 def fuzz_one_input(1017 buffer: Union[bytes, bytearray, memoryview, BinaryIO]1018 ) -> Optional[bytes]:1019 # This inner part is all that the fuzzer will actually run,1020 # so we keep it as small and as fast as possible....

Full Screen

Full Screen

_hypothesis.py

Source:_hypothesis.py Github

copy

Full Screen

1"""Provide strategies for given endpoint(s) definition."""2import asyncio3from typing import Any, Callable, Dict, List, Optional, Tuple, Union4import hypothesis5from hypothesis import strategies as st6from hypothesis.strategies import SearchStrategy7from hypothesis.utils.conventions import InferType8from .constants import DEFAULT_DEADLINE, DataGenerationMethod9from .exceptions import InvalidSchema10from .hooks import GLOBAL_HOOK_DISPATCHER, HookContext, HookDispatcher11from .models import Case, Endpoint12from .stateful import Feedback, Stateful13GivenInput = Union[SearchStrategy, InferType]14def create_test(15 *,16 endpoint: Endpoint,17 test: Callable,18 settings: Optional[hypothesis.settings] = None,19 seed: Optional[int] = None,20 data_generation_method: DataGenerationMethod = DataGenerationMethod.default(),21 _given_args: Tuple[GivenInput, ...] = (),22 _given_kwargs: Optional[Dict[str, GivenInput]] = None,23) -> Callable:24 """Create a Hypothesis test."""25 hook_dispatcher = getattr(test, "_schemathesis_hooks", None)26 feedback: Optional[Feedback]27 if endpoint.schema.stateful == Stateful.links:28 feedback = Feedback(endpoint.schema.stateful, endpoint)29 else:30 feedback = None31 strategy = endpoint.as_strategy(32 hooks=hook_dispatcher, feedback=feedback, data_generation_method=data_generation_method33 )34 _given_kwargs = (_given_kwargs or {}).copy()35 _given_kwargs.setdefault("case", strategy)36 wrapped_test = hypothesis.given(*_given_args, **_given_kwargs)(test)37 if seed is not None:38 wrapped_test = hypothesis.seed(seed)(wrapped_test)39 if asyncio.iscoroutinefunction(test):40 wrapped_test.hypothesis.inner_test = make_async_test(test) # type: ignore41 setup_default_deadline(wrapped_test)42 if settings is not None:43 wrapped_test = settings(wrapped_test)44 wrapped_test._schemathesis_feedback = feedback # type: ignore45 return add_examples(wrapped_test, endpoint, hook_dispatcher=hook_dispatcher)46def setup_default_deadline(wrapped_test: Callable) -> None:47 # Quite hacky, but it is the simplest way to set up the default deadline value without affecting non-Schemathesis48 # tests globally49 existing_settings = getattr(wrapped_test, "_hypothesis_internal_use_settings", None)50 if existing_settings is not None and existing_settings.deadline == hypothesis.settings.default.deadline:51 new_settings = hypothesis.settings(existing_settings, deadline=DEFAULT_DEADLINE)52 wrapped_test._hypothesis_internal_use_settings = new_settings # type: ignore53def make_async_test(test: Callable) -> Callable:54 def async_run(*args: Any, **kwargs: Any) -> None:55 loop = asyncio.get_event_loop()56 coro = test(*args, **kwargs)57 future = asyncio.ensure_future(coro, loop=loop)58 loop.run_until_complete(future)59 return async_run60def add_examples(test: Callable, endpoint: Endpoint, hook_dispatcher: Optional[HookDispatcher] = None) -> Callable:61 """Add examples to the Hypothesis test, if they are specified in the schema."""62 try:63 examples: List[Case] = [get_single_example(strategy) for strategy in endpoint.get_strategies_from_examples()]64 except InvalidSchema:65 # In this case, the user didn't pass `--validate-schema=false` and see an error in the output anyway,66 # and no tests will be executed. For this reason, examples can be skipped67 return test68 context = HookContext(endpoint) # context should be passed here instead69 GLOBAL_HOOK_DISPATCHER.dispatch("before_add_examples", context, examples)70 endpoint.schema.hooks.dispatch("before_add_examples", context, examples)71 if hook_dispatcher:72 hook_dispatcher.dispatch("before_add_examples", context, examples)73 for example in examples:74 test = hypothesis.example(case=example)(test)75 return test76def get_single_example(strategy: st.SearchStrategy[Case]) -> Case:77 @hypothesis.given(strategy) # type: ignore78 @hypothesis.settings( # type: ignore79 database=None,80 max_examples=1,81 deadline=None,82 verbosity=hypothesis.Verbosity.quiet,83 phases=(hypothesis.Phase.generate,),84 suppress_health_check=hypothesis.HealthCheck.all(),85 )86 def example_generating_inner_function(ex: Case) -> None:87 examples.append(ex)88 examples: List[Case] = []89 example_generating_inner_function()...

Full Screen

Full Screen

test_decorators.py

Source:test_decorators.py Github

copy

Full Screen

...10 pass11def unwrapped_forall(self):12 pass13@pysimtest.test(run=True)14def wrapped_test(self):15 pass16def unwrapped_test(self):17 pass18@pysimtest.test(run=False)19def no_run_test(self):20 pass21class TestDecorators:22 def test_forall_function_attr(self):23 assert hasattr(wrapped_forall, "is_forall")24 def test_forall_no_function_attr(self):25 assert not hasattr(unwrapped_forall, "is_forall")26 def test_test_function_attr(self):27 assert hasattr(wrapped_test, "is_test")28 def test_test_no_function_attr(self):29 assert not hasattr(unwrapped_test, "test")30 def test_test_run_attr(self):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful