How to use pytest_collectreport method in Pytest

Best Python code snippet using pytest

test_reports.py

Source:test_reports.py Github

copy

Full Screen

1import pytest2from _pytest.pathlib import Path3from _pytest.reports import CollectReport4from _pytest.reports import TestReport5class TestReportSerialization(object):6 def test_xdist_longrepr_to_str_issue_241(self, testdir):7 """8 Regarding issue pytest-xdist#2419 This test came originally from test_remote.py in xdist (ca03269).10 """11 testdir.makepyfile(12 """13 def test_a(): assert False14 def test_b(): pass15 """16 )17 reprec = testdir.inline_run()18 reports = reprec.getreports("pytest_runtest_logreport")19 assert len(reports) == 620 test_a_call = reports[1]21 assert test_a_call.when == "call"22 assert test_a_call.outcome == "failed"23 assert test_a_call._to_json()["longrepr"]["reprtraceback"]["style"] == "long"24 test_b_call = reports[4]25 assert test_b_call.when == "call"26 assert test_b_call.outcome == "passed"27 assert test_b_call._to_json()["longrepr"] is None28 def test_xdist_report_longrepr_reprcrash_130(self, testdir):29 """Regarding issue pytest-xdist#13030 This test came originally from test_remote.py in xdist (ca03269).31 """32 reprec = testdir.inline_runsource(33 """34 def test_fail():35 assert False, 'Expected Message'36 """37 )38 reports = reprec.getreports("pytest_runtest_logreport")39 assert len(reports) == 340 rep = reports[1]41 added_section = ("Failure Metadata", str("metadata metadata"), "*")42 rep.longrepr.sections.append(added_section)43 d = rep._to_json()44 a = TestReport._from_json(d)45 # Check assembled == rep46 assert a.__dict__.keys() == rep.__dict__.keys()47 for key in rep.__dict__.keys():48 if key != "longrepr":49 assert getattr(a, key) == getattr(rep, key)50 assert rep.longrepr.reprcrash.lineno == a.longrepr.reprcrash.lineno51 assert rep.longrepr.reprcrash.message == a.longrepr.reprcrash.message52 assert rep.longrepr.reprcrash.path == a.longrepr.reprcrash.path53 assert rep.longrepr.reprtraceback.entrysep == a.longrepr.reprtraceback.entrysep54 assert (55 rep.longrepr.reprtraceback.extraline == a.longrepr.reprtraceback.extraline56 )57 assert rep.longrepr.reprtraceback.style == a.longrepr.reprtraceback.style58 assert rep.longrepr.sections == a.longrepr.sections59 # Missing section attribute PR17160 assert added_section in a.longrepr.sections61 def test_reprentries_serialization_170(self, testdir):62 """Regarding issue pytest-xdist#17063 This test came originally from test_remote.py in xdist (ca03269).64 """65 from _pytest._code.code import ReprEntry66 reprec = testdir.inline_runsource(67 """68 def test_repr_entry():69 x = 070 assert x71 """,72 "--showlocals",73 )74 reports = reprec.getreports("pytest_runtest_logreport")75 assert len(reports) == 376 rep = reports[1]77 d = rep._to_json()78 a = TestReport._from_json(d)79 rep_entries = rep.longrepr.reprtraceback.reprentries80 a_entries = a.longrepr.reprtraceback.reprentries81 for i in range(len(a_entries)):82 assert isinstance(rep_entries[i], ReprEntry)83 assert rep_entries[i].lines == a_entries[i].lines84 assert rep_entries[i].reprfileloc.lineno == a_entries[i].reprfileloc.lineno85 assert (86 rep_entries[i].reprfileloc.message == a_entries[i].reprfileloc.message87 )88 assert rep_entries[i].reprfileloc.path == a_entries[i].reprfileloc.path89 assert rep_entries[i].reprfuncargs.args == a_entries[i].reprfuncargs.args90 assert rep_entries[i].reprlocals.lines == a_entries[i].reprlocals.lines91 assert rep_entries[i].style == a_entries[i].style92 def test_reprentries_serialization_196(self, testdir):93 """Regarding issue pytest-xdist#19694 This test came originally from test_remote.py in xdist (ca03269).95 """96 from _pytest._code.code import ReprEntryNative97 reprec = testdir.inline_runsource(98 """99 def test_repr_entry_native():100 x = 0101 assert x102 """,103 "--tb=native",104 )105 reports = reprec.getreports("pytest_runtest_logreport")106 assert len(reports) == 3107 rep = reports[1]108 d = rep._to_json()109 a = TestReport._from_json(d)110 rep_entries = rep.longrepr.reprtraceback.reprentries111 a_entries = a.longrepr.reprtraceback.reprentries112 for i in range(len(a_entries)):113 assert isinstance(rep_entries[i], ReprEntryNative)114 assert rep_entries[i].lines == a_entries[i].lines115 def test_itemreport_outcomes(self, testdir):116 """117 This test came originally from test_remote.py in xdist (ca03269).118 """119 reprec = testdir.inline_runsource(120 """121 import py122 def test_pass(): pass123 def test_fail(): 0/0124 @py.test.mark.skipif("True")125 def test_skip(): pass126 def test_skip_imperative():127 py.test.skip("hello")128 @py.test.mark.xfail("True")129 def test_xfail(): 0/0130 def test_xfail_imperative():131 py.test.xfail("hello")132 """133 )134 reports = reprec.getreports("pytest_runtest_logreport")135 assert len(reports) == 17 # with setup/teardown "passed" reports136 for rep in reports:137 d = rep._to_json()138 newrep = TestReport._from_json(d)139 assert newrep.passed == rep.passed140 assert newrep.failed == rep.failed141 assert newrep.skipped == rep.skipped142 if newrep.skipped and not hasattr(newrep, "wasxfail"):143 assert len(newrep.longrepr) == 3144 assert newrep.outcome == rep.outcome145 assert newrep.when == rep.when146 assert newrep.keywords == rep.keywords147 if rep.failed:148 assert newrep.longreprtext == rep.longreprtext149 def test_collectreport_passed(self, testdir):150 """This test came originally from test_remote.py in xdist (ca03269)."""151 reprec = testdir.inline_runsource("def test_func(): pass")152 reports = reprec.getreports("pytest_collectreport")153 for rep in reports:154 d = rep._to_json()155 newrep = CollectReport._from_json(d)156 assert newrep.passed == rep.passed157 assert newrep.failed == rep.failed158 assert newrep.skipped == rep.skipped159 def test_collectreport_fail(self, testdir):160 """This test came originally from test_remote.py in xdist (ca03269)."""161 reprec = testdir.inline_runsource("qwe abc")162 reports = reprec.getreports("pytest_collectreport")163 assert reports164 for rep in reports:165 d = rep._to_json()166 newrep = CollectReport._from_json(d)167 assert newrep.passed == rep.passed168 assert newrep.failed == rep.failed169 assert newrep.skipped == rep.skipped170 if rep.failed:171 assert newrep.longrepr == str(rep.longrepr)172 def test_extended_report_deserialization(self, testdir):173 """This test came originally from test_remote.py in xdist (ca03269)."""174 reprec = testdir.inline_runsource("qwe abc")175 reports = reprec.getreports("pytest_collectreport")176 assert reports177 for rep in reports:178 rep.extra = True179 d = rep._to_json()180 newrep = CollectReport._from_json(d)181 assert newrep.extra182 assert newrep.passed == rep.passed183 assert newrep.failed == rep.failed184 assert newrep.skipped == rep.skipped185 if rep.failed:186 assert newrep.longrepr == str(rep.longrepr)187 def test_paths_support(self, testdir):188 """Report attributes which are py.path or pathlib objects should become strings."""189 testdir.makepyfile(190 """191 def test_a():192 assert False193 """194 )195 reprec = testdir.inline_run()196 reports = reprec.getreports("pytest_runtest_logreport")197 assert len(reports) == 3198 test_a_call = reports[1]199 test_a_call.path1 = testdir.tmpdir200 test_a_call.path2 = Path(testdir.tmpdir)201 data = test_a_call._to_json()202 assert data["path1"] == str(testdir.tmpdir)203 assert data["path2"] == str(testdir.tmpdir)204 def test_unserialization_failure(self, testdir):205 """Check handling of failure during unserialization of report types."""206 testdir.makepyfile(207 """208 def test_a():209 assert False210 """211 )212 reprec = testdir.inline_run()213 reports = reprec.getreports("pytest_runtest_logreport")214 assert len(reports) == 3215 test_a_call = reports[1]216 data = test_a_call._to_json()217 entry = data["longrepr"]["reprtraceback"]["reprentries"][0]218 assert entry["type"] == "ReprEntry"219 entry["type"] = "Unknown"220 with pytest.raises(221 RuntimeError, match="INTERNALERROR: Unknown entry type returned: Unknown"222 ):223 TestReport._from_json(data)224class TestHooks:225 """Test that the hooks are working correctly for plugins"""226 def test_test_report(self, testdir, pytestconfig):227 testdir.makepyfile(228 """229 def test_a(): assert False230 def test_b(): pass231 """232 )233 reprec = testdir.inline_run()234 reports = reprec.getreports("pytest_runtest_logreport")235 assert len(reports) == 6236 for rep in reports:237 data = pytestconfig.hook.pytest_report_to_serializable(238 config=pytestconfig, report=rep239 )240 assert data["_report_type"] == "TestReport"241 new_rep = pytestconfig.hook.pytest_report_from_serializable(242 config=pytestconfig, data=data243 )244 assert new_rep.nodeid == rep.nodeid245 assert new_rep.when == rep.when246 assert new_rep.outcome == rep.outcome247 def test_collect_report(self, testdir, pytestconfig):248 testdir.makepyfile(249 """250 def test_a(): assert False251 def test_b(): pass252 """253 )254 reprec = testdir.inline_run()255 reports = reprec.getreports("pytest_collectreport")256 assert len(reports) == 2257 for rep in reports:258 data = pytestconfig.hook.pytest_report_to_serializable(259 config=pytestconfig, report=rep260 )261 assert data["_report_type"] == "CollectReport"262 new_rep = pytestconfig.hook.pytest_report_from_serializable(263 config=pytestconfig, data=data264 )265 assert new_rep.nodeid == rep.nodeid266 assert new_rep.when == "collect"267 assert new_rep.outcome == rep.outcome268 @pytest.mark.parametrize(269 "hook_name", ["pytest_runtest_logreport", "pytest_collectreport"]270 )271 def test_invalid_report_types(self, testdir, pytestconfig, hook_name):272 testdir.makepyfile(273 """274 def test_a(): pass275 """276 )277 reprec = testdir.inline_run()278 reports = reprec.getreports(hook_name)279 assert reports280 rep = reports[0]281 data = pytestconfig.hook.pytest_report_to_serializable(282 config=pytestconfig, report=rep283 )284 data["_report_type"] = "Unknown"285 with pytest.raises(AssertionError):286 _ = pytestconfig.hook.pytest_report_from_serializable(287 config=pytestconfig, data=data...

Full Screen

Full Screen

test_remote.py

Source:test_remote.py Github

copy

Full Screen

1import py2from xdist.slavemanage import SlaveController, unserialize_report3from xdist.remote import serialize_report4import execnet5queue = py.builtin._tryimport("queue", "Queue")6import marshal7WAIT_TIMEOUT = 10.08def check_marshallable(d):9 try:10 marshal.dumps(d)11 except ValueError:12 py.std.pprint.pprint(d)13 raise ValueError("not marshallable")14class EventCall:15 def __init__(self, eventcall):16 self.name, self.kwargs = eventcall17 def __str__(self):18 return "<EventCall %s(**%s)>" %(self.name, self.kwargs)19class SlaveSetup:20 use_callback = False21 def __init__(self, request):22 self.testdir = request.getfuncargvalue("testdir")23 self.request = request24 self.events = queue.Queue()25 def setup(self, ):26 self.testdir.chdir()27 #import os ; os.environ['EXECNET_DEBUG'] = "2"28 self.gateway = execnet.makegateway()29 self.config = config = self.testdir.parseconfigure()30 putevent = self.use_callback and self.events.put or None31 self.slp = SlaveController(None, self.gateway, config, putevent)32 self.request.addfinalizer(self.slp.ensure_teardown)33 self.slp.setup()34 def popevent(self, name=None):35 while 1:36 if self.use_callback:37 data = self.events.get(timeout=WAIT_TIMEOUT)38 else:39 data = self.slp.channel.receive(timeout=WAIT_TIMEOUT)40 ev = EventCall(data)41 if name is None or ev.name == name:42 return ev43 print("skipping %s" % (ev,))44 def sendcommand(self, name, **kwargs):45 self.slp.sendcommand(name, **kwargs)46def pytest_funcarg__slave(request):47 return SlaveSetup(request)48def test_remoteinitconfig(testdir):49 from xdist.remote import remote_initconfig50 config1 = testdir.parseconfig()51 config2 = remote_initconfig(config1.option.__dict__, config1.args)52 assert config2.option.__dict__ == config1.option.__dict__53 assert config2.pluginmanager.getplugin("terminal") in (-1, None)54class TestReportSerialization:55 def test_itemreport_outcomes(self, testdir):56 reprec = testdir.inline_runsource("""57 import py58 def test_pass(): pass59 def test_fail(): 0/060 @py.test.mark.skipif("True")61 def test_skip(): pass62 def test_skip_imperative():63 py.test.skip("hello")64 @py.test.mark.xfail("True")65 def test_xfail(): 0/066 def test_xfail_imperative():67 py.test.xfail("hello")68 """)69 reports = reprec.getreports("pytest_runtest_logreport")70 assert len(reports) == 17 # with setup/teardown "passed" reports71 for rep in reports:72 d = serialize_report(rep)73 check_marshallable(d)74 newrep = unserialize_report("testreport", d)75 assert newrep.passed == rep.passed76 assert newrep.failed == rep.failed77 assert newrep.skipped == rep.skipped78 if newrep.skipped and not hasattr(newrep, "wasxfail"):79 assert len(newrep.longrepr) == 380 assert newrep.outcome == rep.outcome81 assert newrep.when == rep.when82 assert newrep.keywords == rep.keywords83 if rep.failed:84 assert newrep.longrepr == str(rep.longrepr)85 def test_collectreport_passed(self, testdir):86 reprec = testdir.inline_runsource("def test_func(): pass")87 reports = reprec.getreports("pytest_collectreport")88 for rep in reports:89 d = serialize_report(rep)90 check_marshallable(d)91 newrep = unserialize_report("collectreport", d)92 assert newrep.passed == rep.passed93 assert newrep.failed == rep.failed94 assert newrep.skipped == rep.skipped95 def test_collectreport_fail(self, testdir):96 reprec = testdir.inline_runsource("qwe abc")97 reports = reprec.getreports("pytest_collectreport")98 assert reports99 for rep in reports:100 d = serialize_report(rep)101 check_marshallable(d)102 newrep = unserialize_report("collectreport", d)103 assert newrep.passed == rep.passed104 assert newrep.failed == rep.failed105 assert newrep.skipped == rep.skipped106 if rep.failed:107 assert newrep.longrepr == str(rep.longrepr)108 def test_extended_report_deserialization(self, testdir):109 reprec = testdir.inline_runsource("qwe abc")110 reports = reprec.getreports("pytest_collectreport")111 assert reports112 for rep in reports:113 rep.extra = True114 d = serialize_report(rep)115 check_marshallable(d)116 newrep = unserialize_report("collectreport", d)117 assert newrep.extra118 assert newrep.passed == rep.passed119 assert newrep.failed == rep.failed120 assert newrep.skipped == rep.skipped121 if rep.failed:122 assert newrep.longrepr == str(rep.longrepr)123class TestSlaveInteractor:124 def test_basic_collect_and_runtests(self, slave):125 slave.testdir.makepyfile("""126 def test_func():127 pass128 """)129 slave.setup()130 ev = slave.popevent()131 assert ev.name == "slaveready"132 ev = slave.popevent()133 assert ev.name == "collectionstart"134 assert not ev.kwargs135 ev = slave.popevent("collectionfinish")136 assert ev.kwargs['topdir'] == slave.testdir.tmpdir137 ids = ev.kwargs['ids']138 assert len(ids) == 1139 slave.sendcommand("runtests", indices=list(range(len(ids))))140 slave.sendcommand("shutdown")141 ev = slave.popevent("logstart")142 assert ev.kwargs["nodeid"].endswith("test_func")143 assert len(ev.kwargs["location"]) == 3144 ev = slave.popevent("testreport") # setup145 ev = slave.popevent("testreport")146 assert ev.name == "testreport"147 rep = unserialize_report(ev.name, ev.kwargs['data'])148 assert rep.nodeid.endswith("::test_func")149 assert rep.passed150 assert rep.when == "call"151 ev = slave.popevent("slavefinished")152 assert 'slaveoutput' in ev.kwargs153 def test_remote_collect_skip(self, slave):154 slave.testdir.makepyfile("""155 import py156 py.test.skip("hello")157 """)158 slave.setup()159 ev = slave.popevent("collectionstart")160 assert not ev.kwargs161 ev = slave.popevent()162 assert ev.name == "collectreport"163 ev = slave.popevent()164 assert ev.name == "collectreport"165 rep = unserialize_report(ev.name, ev.kwargs['data'])166 assert rep.skipped167 ev = slave.popevent("collectionfinish")168 assert not ev.kwargs['ids']169 def test_remote_collect_fail(self, slave):170 slave.testdir.makepyfile("""aasd qwe""")171 slave.setup()172 ev = slave.popevent("collectionstart")173 assert not ev.kwargs174 ev = slave.popevent()175 assert ev.name == "collectreport"176 ev = slave.popevent()177 assert ev.name == "collectreport"178 rep = unserialize_report(ev.name, ev.kwargs['data'])179 assert rep.failed180 ev = slave.popevent("collectionfinish")181 assert not ev.kwargs['ids']182 def test_runtests_all(self, slave):183 slave.testdir.makepyfile("""184 def test_func(): pass185 def test_func2(): pass186 """)187 slave.setup()188 ev = slave.popevent()189 assert ev.name == "slaveready"190 ev = slave.popevent()191 assert ev.name == "collectionstart"192 assert not ev.kwargs193 ev = slave.popevent("collectionfinish")194 ids = ev.kwargs['ids']195 assert len(ids) == 2196 slave.sendcommand("runtests_all", )197 slave.sendcommand("shutdown", )198 for func in "::test_func", "::test_func2":199 for i in range(3): # setup/call/teardown200 ev = slave.popevent("testreport")201 assert ev.name == "testreport"202 rep = unserialize_report(ev.name, ev.kwargs['data'])203 assert rep.nodeid.endswith(func)204 ev = slave.popevent("slavefinished")205 assert 'slaveoutput' in ev.kwargs206 def test_happy_run_events_converted(self, testdir, slave):207 py.test.xfail("implement a simple test for event production")208 assert not slave.use_callback209 slave.testdir.makepyfile("""210 def test_func():211 pass212 """)213 slave.setup()214 hookrec = testdir.getreportrecorder(slave.config)215 for data in slave.slp.channel:216 slave.slp.process_from_remote(data)217 slave.slp.process_from_remote(slave.slp.ENDMARK)218 py.std.pprint.pprint(hookrec.hookrecorder.calls)219 hookrec.hookrecorder.contains([220 ("pytest_collectstart", "collector.fspath == aaa"),221 ("pytest_pycollect_makeitem", "name == 'test_func'"),222 ("pytest_collectreport", "report.collector.fspath == aaa"),223 ("pytest_collectstart", "collector.fspath == bbb"),224 ("pytest_pycollect_makeitem", "name == 'test_func'"),225 ("pytest_collectreport", "report.collector.fspath == bbb"),...

Full Screen

Full Screen

test_session.py

Source:test_session.py Github

copy

Full Screen

1from __future__ import absolute_import, division, print_function2import pytest3from _pytest.main import EXIT_NOTESTSCOLLECTED4class SessionTests(object):5 def test_basic_testitem_events(self, testdir):6 tfile = testdir.makepyfile("""7 def test_one():8 pass9 def test_one_one():10 assert 011 def test_other():12 raise ValueError(23)13 class TestClass(object):14 def test_two(self, someargs):15 pass16 """)17 reprec = testdir.inline_run(tfile)18 passed, skipped, failed = reprec.listoutcomes()19 assert len(skipped) == 020 assert len(passed) == 121 assert len(failed) == 322 def end(x):23 return x.nodeid.split("::")[-1]24 assert end(failed[0]) == "test_one_one"25 assert end(failed[1]) == "test_other"26 itemstarted = reprec.getcalls("pytest_itemcollected")27 assert len(itemstarted) == 428 # XXX check for failing funcarg setup29 # colreports = reprec.getcalls("pytest_collectreport")30 # assert len(colreports) == 431 # assert colreports[1].report.failed32 def test_nested_import_error(self, testdir):33 tfile = testdir.makepyfile("""34 import import_fails35 def test_this():36 assert import_fails.a == 137 """, import_fails="""38 import does_not_work39 a = 140 """)41 reprec = testdir.inline_run(tfile)42 values = reprec.getfailedcollections()43 assert len(values) == 144 out = str(values[0].longrepr)45 assert out.find('does_not_work') != -146 def test_raises_output(self, testdir):47 reprec = testdir.inline_runsource("""48 import pytest49 def test_raises_doesnt():50 pytest.raises(ValueError, int, "3")51 """)52 passed, skipped, failed = reprec.listoutcomes()53 assert len(failed) == 154 out = failed[0].longrepr.reprcrash.message55 if not out.find("DID NOT RAISE") != -1:56 print(out)57 pytest.fail("incorrect raises() output")58 def test_generator_yields_None(self, testdir):59 reprec = testdir.inline_runsource("""60 def test_1():61 yield None62 """)63 failures = reprec.getfailedcollections()64 out = failures[0].longrepr.reprcrash.message65 i = out.find('TypeError')66 assert i != -167 def test_syntax_error_module(self, testdir):68 reprec = testdir.inline_runsource("this is really not python")69 values = reprec.getfailedcollections()70 assert len(values) == 171 out = str(values[0].longrepr)72 assert out.find(str('not python')) != -173 def test_exit_first_problem(self, testdir):74 reprec = testdir.inline_runsource("""75 def test_one(): assert 076 def test_two(): assert 077 """, '--exitfirst')78 passed, skipped, failed = reprec.countoutcomes()79 assert failed == 180 assert passed == skipped == 081 def test_maxfail(self, testdir):82 reprec = testdir.inline_runsource("""83 def test_one(): assert 084 def test_two(): assert 085 def test_three(): assert 086 """, '--maxfail=2')87 passed, skipped, failed = reprec.countoutcomes()88 assert failed == 289 assert passed == skipped == 090 def test_broken_repr(self, testdir):91 p = testdir.makepyfile("""92 import pytest93 class BrokenRepr1(object):94 foo=095 def __repr__(self):96 raise Exception("Ha Ha fooled you, I'm a broken repr().")97 class TestBrokenClass(object):98 def test_explicit_bad_repr(self):99 t = BrokenRepr1()100 pytest.raises(Exception, 'repr(t)')101 def test_implicit_bad_repr1(self):102 t = BrokenRepr1()103 assert t.foo == 1104 """)105 reprec = testdir.inline_run(p)106 passed, skipped, failed = reprec.listoutcomes()107 assert len(failed) == 1108 out = failed[0].longrepr.reprcrash.message109 assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 # '110 def test_skip_file_by_conftest(self, testdir):111 testdir.makepyfile(conftest="""112 import pytest113 def pytest_collect_file():114 pytest.skip("intentional")115 """, test_file="""116 def test_one(): pass117 """)118 try:119 reprec = testdir.inline_run(testdir.tmpdir)120 except pytest.skip.Exception:121 pytest.fail("wrong skipped caught")122 reports = reprec.getreports("pytest_collectreport")123 assert len(reports) == 1124 assert reports[0].skipped125class TestNewSession(SessionTests):126 def test_order_of_execution(self, testdir):127 reprec = testdir.inline_runsource("""128 values = []129 def test_1():130 values.append(1)131 def test_2():132 values.append(2)133 def test_3():134 assert values == [1,2]135 class Testmygroup(object):136 reslist = values137 def test_1(self):138 self.reslist.append(1)139 def test_2(self):140 self.reslist.append(2)141 def test_3(self):142 self.reslist.append(3)143 def test_4(self):144 assert self.reslist == [1,2,1,2,3]145 """)146 passed, skipped, failed = reprec.countoutcomes()147 assert failed == skipped == 0148 assert passed == 7149 # also test listnames() here ...150 def test_collect_only_with_various_situations(self, testdir):151 p = testdir.makepyfile(152 test_one="""153 def test_one():154 raise ValueError()155 class TestX(object):156 def test_method_one(self):157 pass158 class TestY(TestX):159 pass160 """,161 test_three="xxxdsadsadsadsa",162 __init__=""163 )164 reprec = testdir.inline_run('--collect-only', p.dirpath())165 itemstarted = reprec.getcalls("pytest_itemcollected")166 assert len(itemstarted) == 3167 assert not reprec.getreports("pytest_runtest_logreport")168 started = reprec.getcalls("pytest_collectstart")169 finished = reprec.getreports("pytest_collectreport")170 assert len(started) == len(finished)171 assert len(started) == 7 # XXX extra TopCollector172 colfail = [x for x in finished if x.failed]173 assert len(colfail) == 1174 def test_minus_x_import_error(self, testdir):175 testdir.makepyfile(__init__="")176 testdir.makepyfile(test_one="xxxx", test_two="yyyy")177 reprec = testdir.inline_run("-x", testdir.tmpdir)178 finished = reprec.getreports("pytest_collectreport")179 colfail = [x for x in finished if x.failed]180 assert len(colfail) == 1181 def test_minus_x_overridden_by_maxfail(self, testdir):182 testdir.makepyfile(__init__="")183 testdir.makepyfile(test_one="xxxx", test_two="yyyy", test_third="zzz")184 reprec = testdir.inline_run("-x", "--maxfail=2", testdir.tmpdir)185 finished = reprec.getreports("pytest_collectreport")186 colfail = [x for x in finished if x.failed]187 assert len(colfail) == 2188def test_plugin_specify(testdir):189 pytest.raises(ImportError, """190 testdir.parseconfig("-p", "nqweotexistent")191 """)192 # pytest.raises(ImportError,193 # "config.do_configure(config)"194 # )195def test_plugin_already_exists(testdir):196 config = testdir.parseconfig("-p", "terminal")197 assert config.option.plugins == ['terminal']198 config._do_configure()199 config._ensure_unconfigure()200def test_exclude(testdir):201 hellodir = testdir.mkdir("hello")202 hellodir.join("test_hello.py").write("x y syntaxerror")203 hello2dir = testdir.mkdir("hello2")204 hello2dir.join("test_hello2.py").write("x y syntaxerror")205 testdir.makepyfile(test_ok="def test_pass(): pass")206 result = testdir.runpytest("--ignore=hello", "--ignore=hello2")207 assert result.ret == 0208 result.stdout.fnmatch_lines(["*1 passed*"])209def test_sessionfinish_with_start(testdir):210 testdir.makeconftest("""211 import os212 values = []213 def pytest_sessionstart():214 values.append(os.getcwd())215 os.chdir("..")216 def pytest_sessionfinish():217 assert values[0] == os.getcwd()218 """)219 res = testdir.runpytest("--collect-only")...

Full Screen

Full Screen

conftest.py

Source:conftest.py Github

copy

Full Screen

1import pytest2def pytest_collectreport(report):3 if report.failed:...

Full Screen

Full Screen

Pytest Tutorial

Looking for an in-depth tutorial around pytest? LambdaTest covers the detailed pytest tutorial that has everything related to the pytest, from setting up the pytest framework to automation testing. Delve deeper into pytest testing by exploring advanced use cases like parallel testing, pytest fixtures, parameterization, executing multiple test cases from a single file, and more.

Chapters

  1. What is pytest
  2. Pytest installation: Want to start pytest from scratch? See how to install and configure pytest for Python automation testing.
  3. Run first test with pytest framework: Follow this step-by-step tutorial to write and run your first pytest script.
  4. Parallel testing with pytest: A hands-on guide to parallel testing with pytest to improve the scalability of your test automation.
  5. Generate pytest reports: Reports make it easier to understand the results of pytest-based test runs. Learn how to generate pytest reports.
  6. Pytest Parameterized tests: Create and run your pytest scripts while avoiding code duplication and increasing test coverage with parameterization.
  7. Pytest Fixtures: Check out how to implement pytest fixtures for your end-to-end testing needs.
  8. Execute Multiple Test Cases: Explore different scenarios for running multiple test cases in pytest from a single file.
  9. Stop Test Suite after N Test Failures: See how to stop your test suite after n test failures in pytest using the @pytest.mark.incremental decorator and maxfail command-line option.

YouTube

Skim our below pytest tutorial playlist to get started with automation testing using the pytest framework.

https://www.youtube.com/playlist?list=PLZMWkkQEwOPlcGgDmHl8KkXKeLF83XlrP

Run Pytest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful