Best Python code snippet using hypothesis
core.py
Source:core.py  
...780        # so that naive or unusual use-cases get the best possible performance too.781        try:782            return self.__cached_target  # type: ignore783        except AttributeError:784            self.__cached_target = self._get_fuzz_target()785            return self.__cached_target786def given(787    *_given_arguments: Union[SearchStrategy, InferType],788    **_given_kwargs: Union[SearchStrategy, InferType],789) -> Callable[[Callable[..., None]], Callable[..., None]]:790    """A decorator for turning a test function that accepts arguments into a791    randomized test.792    This is the main entry point to Hypothesis.793    """794    def run_test_as_given(test):795        if inspect.isclass(test):796            # Provide a meaningful error to users, instead of exceptions from797            # internals that assume we're dealing with a function.798            raise InvalidArgument("@given cannot be applied to a class.")799        given_arguments = tuple(_given_arguments)800        given_kwargs = dict(_given_kwargs)801        original_argspec = getfullargspec(test)802        check_invalid = is_invalid_test(803            test.__name__, original_argspec, given_arguments, given_kwargs804        )805        # If the argument check found problems, return a dummy test function806        # that will raise an error if it is actually called.807        if check_invalid is not None:808            return check_invalid809        # Because the argument check succeeded, we can convert @given's810        # positional arguments into keyword arguments for simplicity.811        if given_arguments:812            assert not given_kwargs813            for name, strategy in zip(814                reversed(original_argspec.args), reversed(given_arguments)815            ):816                given_kwargs[name] = strategy817        # These have been converted, so delete them to prevent accidental use.818        del given_arguments819        argspec = new_given_argspec(original_argspec, given_kwargs)820        # Use type information to convert "infer" arguments into appropriate strategies.821        if infer in given_kwargs.values():822            hints = get_type_hints(test)823        for name in [name for name, value in given_kwargs.items() if value is infer]:824            if name not in hints:825                # As usual, we want to emit this error when the test is executed,826                # not when it's decorated.827                @impersonate(test)828                @define_function_signature(test.__name__, test.__doc__, argspec)829                def wrapped_test(*arguments, **kwargs):830                    __tracebackhide__ = True831                    raise InvalidArgument(832                        "passed %s=infer for %s, but %s has no type annotation"833                        % (name, test.__name__, name)834                    )835                return wrapped_test836            given_kwargs[name] = st.from_type(hints[name])837        @impersonate(test)838        @define_function_signature(test.__name__, test.__doc__, argspec)839        def wrapped_test(*arguments, **kwargs):840            # Tell pytest to omit the body of this function from tracebacks841            __tracebackhide__ = True842            test = wrapped_test.hypothesis.inner_test843            if getattr(test, "is_hypothesis_test", False):844                raise InvalidArgument(845                    (846                        "You have applied @given to the test %s more than once, which "847                        "wraps the test several times and is extremely slow. A "848                        "similar effect can be gained by combining the arguments "849                        "of the two calls to given. For example, instead of "850                        "@given(booleans()) @given(integers()), you could write "851                        "@given(booleans(), integers())"852                    )853                    % (test.__name__,)854                )855            settings = wrapped_test._hypothesis_internal_use_settings856            random = get_random_for_wrapped_test(test, wrapped_test)857            processed_args = process_arguments_to_given(858                wrapped_test, arguments, kwargs, given_kwargs, argspec859            )860            arguments, kwargs, test_runner, search_strategy = processed_args861            runner = getattr(search_strategy, "runner", None)862            if isinstance(runner, TestCase) and test.__name__ in dir(TestCase):863                msg = (864                    "You have applied @given to the method %s, which is "865                    "used by the unittest runner but is not itself a test."866                    "  This is not useful in any way." % test.__name__867                )868                fail_health_check(settings, msg, HealthCheck.not_a_test_method)869            if bad_django_TestCase(runner):  # pragma: no cover870                # Covered by the Django tests, but not the pytest coverage task871                raise InvalidArgument(872                    "You have applied @given to a method on %s, but this "873                    "class does not inherit from the supported versions in "874                    "`hypothesis.extra.django`.  Use the Hypothesis variants "875                    "to ensure that each example is run in a separate "876                    "database transaction." % qualname(type(runner))877                )878            state = StateForActualGivenExecution(879                test_runner, search_strategy, test, settings, random, wrapped_test880            )881            reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure882            # If there was a @reproduce_failure decorator, use it to reproduce883            # the error (or complain that we couldn't). Either way, this will884            # always raise some kind of error.885            if reproduce_failure is not None:886                expected_version, failure = reproduce_failure887                if expected_version != __version__:888                    raise InvalidArgument(889                        (890                            "Attempting to reproduce a failure from a different "891                            "version of Hypothesis. This failure is from %s, but "892                            "you are currently running %r. Please change your "893                            "Hypothesis version to a matching one."894                        )895                        % (expected_version, __version__)896                    )897                try:898                    state.execute_once(899                        ConjectureData.for_buffer(decode_failure(failure)),900                        print_example=True,901                        is_final=True,902                    )903                    raise DidNotReproduce(904                        "Expected the test to raise an error, but it "905                        "completed successfully."906                    )907                except StopTest:908                    raise DidNotReproduce(909                        "The shape of the test data has changed in some way "910                        "from where this blob was defined. Are you sure "911                        "you're running the same test?"912                    )913                except UnsatisfiedAssumption:914                    raise DidNotReproduce(915                        "The test data failed to satisfy an assumption in the "916                        "test. Have you added it since this blob was "917                        "generated?"918                    )919            # There was no @reproduce_failure, so start by running any explicit920            # examples from @example decorators.921            errors = list(922                execute_explicit_examples(state, wrapped_test, arguments, kwargs)923            )924            with local_settings(state.settings):925                if len(errors) > 1:926                    # If we're not going to report multiple bugs, we would have927                    # stopped running explicit examples at the first failure.928                    assert state.settings.report_multiple_bugs929                    for fragments, err in errors:930                        for f in fragments:931                            report(f)932                        tb_lines = traceback.format_exception(933                            type(err), err, err.__traceback__934                        )935                        report("".join(tb_lines))936                    msg = "Hypothesis found %d failures in explicit examples."937                    raise MultipleFailures(msg % (len(errors)))938                elif errors:939                    fragments, the_error_hypothesis_found = errors[0]940                    for f in fragments:941                        report(f)942                    raise the_error_hypothesis_found943            # If there were any explicit examples, they all ran successfully.944            # The next step is to use the Conjecture engine to run the test on945            # many different inputs.946            if not (947                Phase.reuse in settings.phases or Phase.generate in settings.phases948            ):949                return950            try:951                if isinstance(runner, TestCase) and hasattr(runner, "subTest"):952                    subTest = runner.subTest953                    try:954                        runner.subTest = types.MethodType(fake_subTest, runner)955                        state.run_engine()956                    finally:957                        runner.subTest = subTest958                else:959                    state.run_engine()960            except BaseException as e:961                # The exception caught here should either be an actual test962                # failure (or MultipleFailures), or some kind of fatal error963                # that caused the engine to stop.964                generated_seed = wrapped_test._hypothesis_internal_use_generated_seed965                with local_settings(settings):966                    if not (state.failed_normally or generated_seed is None):967                        if running_under_pytest:968                            report(969                                "You can add @seed(%(seed)d) to this test or "970                                "run pytest with --hypothesis-seed=%(seed)d "971                                "to reproduce this failure." % {"seed": generated_seed}972                            )973                        else:974                            report(975                                "You can add @seed(%d) to this test to "976                                "reproduce this failure." % (generated_seed,)977                            )978                    # The dance here is to avoid showing users long tracebacks979                    # full of Hypothesis internals they don't care about.980                    # We have to do this inline, to avoid adding another981                    # internal stack frame just when we've removed the rest.982                    #983                    # Using a variable for our trimmed error ensures that the line984                    # which will actually appear in tracebacks is as clear as985                    # possible - "raise the_error_hypothesis_found".986                    the_error_hypothesis_found = e.with_traceback(987                        get_trimmed_traceback()988                    )989                    raise the_error_hypothesis_found990        def _get_fuzz_target() -> Callable[991            [Union[bytes, bytearray, memoryview, BinaryIO]], Optional[bytes]992        ]:993            # Because fuzzing interfaces are very performance-sensitive, we use a994            # somewhat more complicated structure here.  `_get_fuzz_target()` is995            # called by the `HypothesisHandle.fuzz_one_input` property, allowing996            # us to defer our collection of the settings, random instance, and997            # reassignable `inner_test` (etc) until `fuzz_one_input` is accessed.998            #999            # We then share the performance cost of setting up `state` between1000            # many invocations of the target.  We explicitly force `deadline=None`1001            # for performance reasons, saving ~40% the runtime of an empty test.1002            test = wrapped_test.hypothesis.inner_test1003            settings = Settings(1004                parent=wrapped_test._hypothesis_internal_use_settings, deadline=None1005            )1006            random = get_random_for_wrapped_test(test, wrapped_test)1007            _args, _kwargs, test_runner, search_strategy = process_arguments_to_given(1008                wrapped_test, (), {}, given_kwargs, argspec...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
