Best Python code snippet using pytest-bdd_python
test_reports.py
Source:test_reports.py  
1import pytest2from _pytest.pathlib import Path3from _pytest.reports import CollectReport4from _pytest.reports import TestReport5class TestReportSerialization(object):6    def test_xdist_longrepr_to_str_issue_241(self, testdir):7        """8        Regarding issue pytest-xdist#2419        This test came originally from test_remote.py in xdist (ca03269).10        """11        testdir.makepyfile(12            """13            def test_a(): assert False14            def test_b(): pass15        """16        )17        reprec = testdir.inline_run()18        reports = reprec.getreports("pytest_runtest_logreport")19        assert len(reports) == 620        test_a_call = reports[1]21        assert test_a_call.when == "call"22        assert test_a_call.outcome == "failed"23        assert test_a_call._to_json()["longrepr"]["reprtraceback"]["style"] == "long"24        test_b_call = reports[4]25        assert test_b_call.when == "call"26        assert test_b_call.outcome == "passed"27        assert test_b_call._to_json()["longrepr"] is None28    def test_xdist_report_longrepr_reprcrash_130(self, testdir):29        """Regarding issue pytest-xdist#13030        This test came originally from test_remote.py in xdist (ca03269).31        """32        reprec = testdir.inline_runsource(33            """34                    def test_fail():35                        assert False, 'Expected Message'36                """37        )38        reports = reprec.getreports("pytest_runtest_logreport")39        assert len(reports) == 340        rep = reports[1]41        added_section = ("Failure Metadata", str("metadata metadata"), "*")42        rep.longrepr.sections.append(added_section)43        d = rep._to_json()44        a = TestReport._from_json(d)45        # Check assembled == rep46        assert a.__dict__.keys() == rep.__dict__.keys()47        for key in rep.__dict__.keys():48            if key != "longrepr":49                assert getattr(a, key) == getattr(rep, key)50        assert rep.longrepr.reprcrash.lineno == a.longrepr.reprcrash.lineno51        assert rep.longrepr.reprcrash.message == a.longrepr.reprcrash.message52        assert rep.longrepr.reprcrash.path == a.longrepr.reprcrash.path53        assert rep.longrepr.reprtraceback.entrysep == a.longrepr.reprtraceback.entrysep54        assert (55            rep.longrepr.reprtraceback.extraline == a.longrepr.reprtraceback.extraline56        )57        assert rep.longrepr.reprtraceback.style == a.longrepr.reprtraceback.style58        assert rep.longrepr.sections == a.longrepr.sections59        # Missing section attribute PR17160        assert added_section in a.longrepr.sections61    def test_reprentries_serialization_170(self, testdir):62        """Regarding issue pytest-xdist#17063        This test came originally from test_remote.py in xdist (ca03269).64        """65        from _pytest._code.code import ReprEntry66        reprec = testdir.inline_runsource(67            """68                            def test_repr_entry():69                                x = 070                                assert x71                        """,72            "--showlocals",73        )74        reports = reprec.getreports("pytest_runtest_logreport")75        assert len(reports) == 376        rep = reports[1]77        d = rep._to_json()78        a = TestReport._from_json(d)79        rep_entries = rep.longrepr.reprtraceback.reprentries80        a_entries = a.longrepr.reprtraceback.reprentries81        for i in range(len(a_entries)):82            assert isinstance(rep_entries[i], ReprEntry)83            assert rep_entries[i].lines == a_entries[i].lines84            assert rep_entries[i].reprfileloc.lineno == a_entries[i].reprfileloc.lineno85            assert (86                rep_entries[i].reprfileloc.message == a_entries[i].reprfileloc.message87            )88            assert rep_entries[i].reprfileloc.path == a_entries[i].reprfileloc.path89            assert rep_entries[i].reprfuncargs.args == a_entries[i].reprfuncargs.args90            assert rep_entries[i].reprlocals.lines == a_entries[i].reprlocals.lines91            assert rep_entries[i].style == a_entries[i].style92    def test_reprentries_serialization_196(self, testdir):93        """Regarding issue pytest-xdist#19694        This test came originally from test_remote.py in xdist (ca03269).95        """96        from _pytest._code.code import ReprEntryNative97        reprec = testdir.inline_runsource(98            """99                            def test_repr_entry_native():100                                x = 0101                                assert x102                        """,103            "--tb=native",104        )105        reports = reprec.getreports("pytest_runtest_logreport")106        assert len(reports) == 3107        rep = reports[1]108        d = rep._to_json()109        a = TestReport._from_json(d)110        rep_entries = rep.longrepr.reprtraceback.reprentries111        a_entries = a.longrepr.reprtraceback.reprentries112        for i in range(len(a_entries)):113            assert isinstance(rep_entries[i], ReprEntryNative)114            assert rep_entries[i].lines == a_entries[i].lines115    def test_itemreport_outcomes(self, testdir):116        """117        This test came originally from test_remote.py in xdist (ca03269).118        """119        reprec = testdir.inline_runsource(120            """121            import py122            def test_pass(): pass123            def test_fail(): 0/0124            @py.test.mark.skipif("True")125            def test_skip(): pass126            def test_skip_imperative():127                py.test.skip("hello")128            @py.test.mark.xfail("True")129            def test_xfail(): 0/0130            def test_xfail_imperative():131                py.test.xfail("hello")132        """133        )134        reports = reprec.getreports("pytest_runtest_logreport")135        assert len(reports) == 17  # with setup/teardown "passed" reports136        for rep in reports:137            d = rep._to_json()138            newrep = TestReport._from_json(d)139            assert newrep.passed == rep.passed140            assert newrep.failed == rep.failed141            assert newrep.skipped == rep.skipped142            if newrep.skipped and not hasattr(newrep, "wasxfail"):143                assert len(newrep.longrepr) == 3144            assert newrep.outcome == rep.outcome145            assert newrep.when == rep.when146            assert newrep.keywords == rep.keywords147            if rep.failed:148                assert newrep.longreprtext == rep.longreprtext149    def test_collectreport_passed(self, testdir):150        """This test came originally from test_remote.py in xdist (ca03269)."""151        reprec = testdir.inline_runsource("def test_func(): pass")152        reports = reprec.getreports("pytest_collectreport")153        for rep in reports:154            d = rep._to_json()155            newrep = CollectReport._from_json(d)156            assert newrep.passed == rep.passed157            assert newrep.failed == rep.failed158            assert newrep.skipped == rep.skipped159    def test_collectreport_fail(self, testdir):160        """This test came originally from test_remote.py in xdist (ca03269)."""161        reprec = testdir.inline_runsource("qwe abc")162        reports = reprec.getreports("pytest_collectreport")163        assert reports164        for rep in reports:165            d = rep._to_json()166            newrep = CollectReport._from_json(d)167            assert newrep.passed == rep.passed168            assert newrep.failed == rep.failed169            assert newrep.skipped == rep.skipped170            if rep.failed:171                assert newrep.longrepr == str(rep.longrepr)172    def test_extended_report_deserialization(self, testdir):173        """This test came originally from test_remote.py in xdist (ca03269)."""174        reprec = testdir.inline_runsource("qwe abc")175        reports = reprec.getreports("pytest_collectreport")176        assert reports177        for rep in reports:178            rep.extra = True179            d = rep._to_json()180            newrep = CollectReport._from_json(d)181            assert newrep.extra182            assert newrep.passed == rep.passed183            assert newrep.failed == rep.failed184            assert newrep.skipped == rep.skipped185            if rep.failed:186                assert newrep.longrepr == str(rep.longrepr)187    def test_paths_support(self, testdir):188        """Report attributes which are py.path or pathlib objects should become strings."""189        testdir.makepyfile(190            """191            def test_a():192                assert False193        """194        )195        reprec = testdir.inline_run()196        reports = reprec.getreports("pytest_runtest_logreport")197        assert len(reports) == 3198        test_a_call = reports[1]199        test_a_call.path1 = testdir.tmpdir200        test_a_call.path2 = Path(testdir.tmpdir)201        data = test_a_call._to_json()202        assert data["path1"] == str(testdir.tmpdir)203        assert data["path2"] == str(testdir.tmpdir)204    def test_unserialization_failure(self, testdir):205        """Check handling of failure during unserialization of report types."""206        testdir.makepyfile(207            """208            def test_a():209                assert False210        """211        )212        reprec = testdir.inline_run()213        reports = reprec.getreports("pytest_runtest_logreport")214        assert len(reports) == 3215        test_a_call = reports[1]216        data = test_a_call._to_json()217        entry = data["longrepr"]["reprtraceback"]["reprentries"][0]218        assert entry["type"] == "ReprEntry"219        entry["type"] = "Unknown"220        with pytest.raises(221            RuntimeError, match="INTERNALERROR: Unknown entry type returned: Unknown"222        ):223            TestReport._from_json(data)224class TestHooks:225    """Test that the hooks are working correctly for plugins"""226    def test_test_report(self, testdir, pytestconfig):227        testdir.makepyfile(228            """229            def test_a(): assert False230            def test_b(): pass231        """232        )233        reprec = testdir.inline_run()234        reports = reprec.getreports("pytest_runtest_logreport")235        assert len(reports) == 6236        for rep in reports:237            data = pytestconfig.hook.pytest_report_to_serializable(238                config=pytestconfig, report=rep239            )240            assert data["_report_type"] == "TestReport"241            new_rep = pytestconfig.hook.pytest_report_from_serializable(242                config=pytestconfig, data=data243            )244            assert new_rep.nodeid == rep.nodeid245            assert new_rep.when == rep.when246            assert new_rep.outcome == rep.outcome247    def test_collect_report(self, testdir, pytestconfig):248        testdir.makepyfile(249            """250            def test_a(): assert False251            def test_b(): pass252        """253        )254        reprec = testdir.inline_run()255        reports = reprec.getreports("pytest_collectreport")256        assert len(reports) == 2257        for rep in reports:258            data = pytestconfig.hook.pytest_report_to_serializable(259                config=pytestconfig, report=rep260            )261            assert data["_report_type"] == "CollectReport"262            new_rep = pytestconfig.hook.pytest_report_from_serializable(263                config=pytestconfig, data=data264            )265            assert new_rep.nodeid == rep.nodeid266            assert new_rep.when == "collect"267            assert new_rep.outcome == rep.outcome268    @pytest.mark.parametrize(269        "hook_name", ["pytest_runtest_logreport", "pytest_collectreport"]270    )271    def test_invalid_report_types(self, testdir, pytestconfig, hook_name):272        testdir.makepyfile(273            """274            def test_a(): pass275            """276        )277        reprec = testdir.inline_run()278        reports = reprec.getreports(hook_name)279        assert reports280        rep = reports[0]281        data = pytestconfig.hook.pytest_report_to_serializable(282            config=pytestconfig, report=rep283        )284        data["_report_type"] = "Unknown"285        with pytest.raises(AssertionError):286            _ = pytestconfig.hook.pytest_report_from_serializable(287                config=pytestconfig, data=data...pytest_failed_to_verify.py
Source:pytest_failed_to_verify.py  
...85    setup_state.stack = list()86def _clear_cache(parallel, report, item):87    if not parallel or works_with_current_xdist():88        # will rerun test, log intermediate result89        item.ihook.pytest_runtest_logreport(report=report)90    # cleanin item's cashed results from any level of setups91    _remove_cached_results_from_failed_fixtures(item)92    _remove_failed_setup_state_from_session(item)93def pytest_runtest_protocol(item, nextitem):94    """95    Note: when teardown fails, two reports are generated for the case, one for96    the test case and the other for the teardown error.97    """98    rerun_setup = get_rerun_setup_count(item)99    if rerun_setup is None:100        # global setting is not specified, no setup reruns101        return102    # while this doesn't need to be run with every item, it will fail on the103    # first item if necessary104    check_options(item.session.config)105    parallel = hasattr(item.config, 'slaveinput')106    item.execution_count = 0107    need_to_run = True108    while need_to_run:109        item.execution_count += 1110        item.ihook.pytest_runtest_logstart(nodeid=item.nodeid,111                                           location=item.location)112        reports = runtestprotocol(item, nextitem=nextitem, log=False)113        for report in reports:  # 3 reports: setup, call, teardown114            report.failed_to_verify = False115            if report.when == 'setup':116                report.rerun = item.execution_count - 1117                xfail = hasattr(report, 'wasxfail')118                if item.execution_count > rerun_setup and _failed(report):119                    # last run and failure detected on setup120                    report.failed_to_verify = True121                    item.ihook.pytest_runtest_logreport(report=report)122                elif item.execution_count > rerun_setup and _passed(report) or report.skipped and not xfail:123                    # last run and no failure detected, log normally124                    item.ihook.pytest_runtest_logreport(report=report)125                elif item.execution_count > rerun_setup and xfail and not report.passed:126                    # last run and setup failed on xfail (remove any xfail traces, otherwise pytest exits with code 0)127                    report.outcome = 'failed'128                    report.failed_to_verify = True129                    del report.wasxfail130                    item.ihook.pytest_runtest_logreport(report=report)131                elif item.execution_count > rerun_setup:132                    item.ihook.pytest_runtest_logreport(report=report)133                elif report.passed:134                    item.ihook.pytest_runtest_logreport(report=report)135                else:136                    report.outcome = 'setup rerun'137                    _clear_cache(parallel, report, item)138                    break  # trigger rerun139            else:140                item.ihook.pytest_runtest_logreport(report=report)141        else:142            need_to_run = False143        item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)144    return True145def _passed(report):146    return report.passed and not report.failed and not report.skipped147def _failed(report):148    return not report.passed and report.failed and not report.skipped149def pytest_report_teststatus(report):150    """Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html151    """152    if report.outcome == 'setup rerun':153        return 'setup rerun', 'SR', ('SETUP RERUN',154                                     {'yellow': True})155    if report.failed_to_verify:156        return 'failed to verify', 'F2V', ('FAILED TO VERIFY',157                                 {'red': True})158def pytest_terminal_summary(terminalreporter):159    """Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html160    """161    tr = terminalreporter162    if not tr.reportchars:163        return164    failed_to_verify = tr.stats.get("failed to verify")165    lines = []166    if failed_to_verify:167        for rep in failed_to_verify:168            pos = rep.nodeid169            lines.append("FAILED TO VERIFY %s" % (pos,))170            lines.append(rep.longreprtext)171    if lines:172        tr._tw.sep("=", "setup rerun test summary info")173        for line in lines:174            tr._tw.line(line)175class RerunResultLog(ResultLog):176    def __init__(self, config, logfile):177        ResultLog.__init__(self, config, logfile)178    def pytest_runtest_logreport(self, report):179        """180        Adds support for rerun report fix for issue:181        https://github.com/pytest-dev/pytest-rerunfailures/issues/28182        """183        if report.when != "call" and report.passed:184            return185        res = self.config.hook.pytest_report_teststatus(report=report)186        code = res[1]187        if code == 'x':188            longrepr = str(report.longrepr)189        elif code == 'X':190            longrepr = ''191        elif report.passed:192            longrepr = ""...testmonitorplugin_test.py
Source:testmonitorplugin_test.py  
...82    assert not plugin.is_websocket_connected()83    mock_event = Mock()84    plugin.send_event(mock_event)85    assert not plugin.is_websocket_connected()86def test_pytest_runtest_logreport(plugin, report, monkeypatch):87    start_time = time.time()88    monkeypatch.setattr(plugin, 'send_event', Mock())89    monkeypatch.setattr(sut.time,90                        'time',91                        Mock(return_value=start_time + 5))92    monkeypatch.setattr(plugin, '_test_cases', {report.nodeid: start_time})93    plugin.pytest_runtest_logreport(report)94    assert len(plugin.send_event.call_args_list) == 295    arg = plugin.send_event.call_args_list[0][0][0]96    assert type(arg) == msg.SessionStartedEvent97    event = plugin.send_event.call_args_list[1][0][0]98    assert type(event) == msg.TestCaseFinishedEvent99    assert event['duration'] == 5000100    assert len(plugin.reports) == 1101    assert plugin.reports[0] == report102def test_pytest_runtest_logreport_starts_session_only_once(103        report,104        plugin,105        monkeypatch):106    monkeypatch.setattr(plugin, 'send_event', Mock())107    monkeypatch.setattr(plugin,108                        '_test_cases',109                        {report.nodeid: time.time()})110    plugin.pytest_runtest_logreport(report)111    assert len(plugin.send_event.call_args_list) == 2112def test_pytest_runtest_logreport_store_tc_start_time(plugin,113                                                      monkeypatch,114                                                      report):115    start_time = time.time()116    monkeypatch.setattr(plugin, 'send_event', Mock())117    monkeypatch.setattr(sut.time,118                        'time',119                        Mock(return_value=start_time))120    report.when = 'setup'121    plugin.pytest_runtest_logreport(report)122    assert len(plugin.send_event.call_args_list) == 0123    assert len(plugin.reports) == 0124    assert report.nodeid in plugin._test_cases125    assert plugin._test_cases[report.nodeid] == start_time126def test_pytest_runtest_logreport_ignore_tc_not_found(plugin,127                                                      monkeypatch,128                                                      report):129    monkeypatch.setattr(plugin, 'send_event', Mock())130    monkeypatch.setattr(plugin, '_test_cases', {})131    report.when = 'call'132    plugin.pytest_runtest_logreport(report)133    assert len(plugin.send_event.call_args_list) == 0134    assert len(plugin.reports) == 0135    assert report.nodeid not in plugin._test_cases136def test_pytest_runtest_logreport_no_detail(plugin, report, monkeypatch):137    monkeypatch.setattr(plugin, 'send_event', Mock())138    report.nodeid = report.fs_path139    report.keywords = {}140    plugin.pytest_runtest_logreport(report)141    assert len(plugin.send_event.call_args_list) == 0142    assert len(plugin.reports) == 0143def test_pytest_runtest_logreport_pep8(plugin, report, monkeypatch):144    monkeypatch.setattr(plugin, 'send_event', Mock())145    report.nodeid = report.fs_path146    report.keywords = {'pep8': 1}147    report.when = 'setup'148    report.outcome = 'skipped'149    plugin.pytest_runtest_logreport(report)150    assert len(plugin.send_event.call_args_list) == 2151    assert len(plugin.reports) == 1152@pytest.mark.parametrize('phase', ['setup', 'teardown'])153def test_pytest_runtest_logreport_ignore_phase(154        phase, plugin, report, monkeypatch):155    monkeypatch.setattr(plugin, 'send_event', Mock())156    report.when = phase157    plugin.pytest_runtest_logreport(report)158    assert len(plugin.send_event.call_args_list) == 0159    assert len(plugin.reports) == 0160def test_empty_single_pass(tmpdir, plugin, monkeypatch):161    test_proj_path = str(tmpdir) + '/singlepass'162    shutil.copytree(TESTDATA_DIR + '/testproj/singlepass',163                    test_proj_path)164    # TODO165    # Deletion of __pycache__ is necessary, or py.test will fail with the166    # following error message:167    #     #168    # import file mismatch:169    # imported module 'simple_test' has this __file__ attribute:170    # /home/bb/devel/python/purkinje/testdata/171    # testproj/singlepass/simple_test.py...test_patch.py
Source:test_patch.py  
...53        pytest_spec.patch.docstring_summaries = dict()54    def test__pytest_runtest_logstart__returns_none(self):55        self.assertEqual(pytest_runtest_logstart('self', 'nodeid', 'location'), None)56    def test__pytest_runtest_logreport__returns_none_when_letter_is_missing(self):57        result = pytest_runtest_logreport(FakeSelf(letter=''), FakeReport('Test::Second::Test_example_demo'))58        self.assertIsNone(result)59    def test__pytest_runtest_logreport__returns_none_when_word_is_missing(self):60        result = pytest_runtest_logreport(FakeSelf(word=''), FakeReport('Test::Second::Test_example_demo'))61        self.assertIsNone(result)62    def test__pytest_runtest_logreport__returns_none_when_nodeid_is_wrong_formatted(self):63        result = pytest_runtest_logreport(FakeSelf(), FakeReport(''))64        self.assertIsNone(result)65    def test__pytest_runtest_logreport__prints_class_name_before_first_test_result(self):66        fake_self = FakeSelf()67        pytest_runtest_logreport(fake_self, FakeReport('Test::Second::Test_example_demo'))68        fake_self._tw.write.assert_has_calls([call('Second:')])69    def test__pytest_runtest_logreport__prints_test_name_and_passed_status(self):70        fake_self = FakeSelf()71        pytest_runtest_logreport(fake_self, FakeReport('Test::Second::test_example_demo'))72        fake_self._tw.write.assert_has_calls([73            call('Second:'),74            call('  â Example demo', green=True)75        ])76    def test__pytest_runtest_logreport__prints_test_name_and_failed_status(self):77        fake_self = FakeSelf()78        pytest_runtest_logreport(fake_self, FakeReport('Test::Second::test_example_demo', passed=False, failed=True))79        fake_self._tw.write.assert_has_calls([80            call('Second:'),81            call('  â Example demo', red=True)82        ])83    def test__pytest_runtest_logreport__prints_test_name_and_skipped_status(self):84        fake_self = FakeSelf()85        pytest_runtest_logreport(fake_self, FakeReport('Test::Second::test_example_demo', passed=False, skipped=True))86        fake_self._tw.write.assert_has_calls([87            call('Second:'),88            call('  ? Example demo', yellow=True)89        ])90    def test__pytest_runtest_logreport__skips_empty_line_for_first_test(self):91        fake_self = FakeSelf()92        pytest_runtest_logreport(fake_self, FakeReport('Test::Second::test_example_demo'))93        with self.assertRaises(AssertionError):94            fake_self._tw.write.assert_has_calls([call.line(), call.line()])95    def test__pytest_runtest_logreport__marks_method_marked_by_double_underscores(self):96        fake_self = FakeSelf()97        pytest_runtest_logreport(fake_self, FakeReport('Test::Second::test__example__demo'))98        fake_self._tw.write.assert_has_calls([99            call('Second:'),100            call('  â Example demo', green=True)101        ])102    def test__pytest_runtest_logreport__prints_test_name_and_handle_only_single_marker(self):103        fake_self = FakeSelf()104        pytest_runtest_logreport(fake_self, FakeReport('Test::Second::test__example'))105        fake_self._tw.write.assert_has_calls([106            call('Second:'),107            call('  â Example', green=True)108        ])109    def test__pytest_runtest_logreport__honors_capitalization_of_words_in_test_name(self):110        fake_self = FakeSelf()111        pytest_runtest_logreport(fake_self, FakeReport('Test::Second::test_example_Demo_CamelCase'))112        fake_self._tw.write.assert_has_calls([113            call('Second:'),114            call('  â Example Demo CamelCase', green=True)115        ])116    def test__pytest_runtest_longreport__uses_docstring_summary(self):117        fake_self = FakeSelf()118        fake_self.config.mapping['spec_test_format'] = "{result} {docstring_summary}"119        pytest_runtest_logreport(fake_self, FakeReport('Test::Second::test_example_Demo_CamelCase'))120        fake_self._tw.write.assert_has_calls([121            call('  â Test documentation', green=True)122        ])123    def test__pytest_runtest_longreport__uses_test_name_as_docstring_summary_if_missing(self):124        fake_self = FakeSelf()125        fake_self.config.mapping['spec_test_format'] = "{result} {docstring_summary}"126        fake_report = FakeReport('Test::Second::test_example_Demo_CamelCase')127        fake_report.docstring_summary = None128        pytest_runtest_logreport(fake_self, fake_report)129        fake_self._tw.write.assert_has_calls([130            call('  â Example Demo CamelCase', green=True)131        ])132    def test__pytest_runtest_logreport__ignores_nodeid_which_matches_ignore_string(self):133        fake_self = FakeSelf()134        pytest_runtest_logreport(fake_self, FakeReport('Test::FLAKE8'))135        assert not fake_self._tw.write.mock_calls136    def test__pytest_runtest_logreport__ignores_nodeid_if_multiple_string_ignore_are_provided(self):137        fake_self = FakeSelf()138        fake_self.config.mapping['spec_ignore'] = "FLAKE8,Something"139        pytest_runtest_logreport(fake_self, FakeReport('Something'))140        assert not fake_self._tw.write.called141if __name__ == '__main__':...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
