How to use test_id method in Slash

Best Python code snippet using slash

test_update.py

Source:test_update.py Github

copy

Full Screen

1import json2import mock3import os4import pytest5import sys6from io import BytesIO7from .. import metadata, manifestupdate8from ..update.update import WPTUpdate9from ..update.base import StepRunner, Step10from mozlog import structuredlog, handlers, formatters11here = os.path.dirname(__file__)12sys.path.insert(0, os.path.join(here, os.pardir, os.pardir, os.pardir))13from manifest import manifest, item as manifest_item, utils14def rel_path_to_test_url(rel_path):15 assert not os.path.isabs(rel_path)16 return rel_path.replace(os.sep, "/")17def SourceFileWithTest(path, hash, cls, *args):18 path_parts = tuple(path.split("/"))19 path = utils.to_os_path(path)20 s = mock.Mock(rel_path=path, rel_path_parts=path_parts, hash=hash)21 test = cls("/foobar", path, "/", rel_path_to_test_url(path), *args)22 s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))23 return s24item_classes = {"testharness": manifest_item.TestharnessTest,25 "reftest": manifest_item.RefTest,26 "manual": manifest_item.ManualTest,27 "wdspec": manifest_item.WebDriverSpecTest,28 "conformancechecker": manifest_item.ConformanceCheckerTest,29 "visual": manifest_item.VisualTest,30 "support": manifest_item.SupportFile}31default_run_info = {"debug": False, "os": "linux", "version": "18.04", "processor": "x86_64", "bits": 64}32test_id = "/path/to/test.htm"33dir_id = "path/to/__dir__"34def reset_globals():35 metadata.prop_intern.clear()36 metadata.run_info_intern.clear()37 metadata.status_intern.clear()38def get_run_info(overrides):39 run_info = default_run_info.copy()40 run_info.update(overrides)41 return run_info42def update(tests, *logs, **kwargs):43 full_update = kwargs.pop("full_update", False)44 disable_intermittent = kwargs.pop("disable_intermittent", False)45 update_intermittent = kwargs.pop("update_intermittent", False)46 remove_intermittent = kwargs.pop("remove_intermittent", False)47 assert not kwargs48 id_test_map, updater = create_updater(tests)49 for log in logs:50 log = create_log(log)51 updater.update_from_log(log)52 update_properties = (["debug", "os", "version", "processor"],53 {"os": ["version"], "processor": ["bits"]})54 expected_data = {}55 metadata.load_expected = lambda _, __, test_path, *args: expected_data.get(test_path)56 for test_path, test_ids, test_type, manifest_str in tests:57 test_path = utils.to_os_path(test_path)58 expected_data[test_path] = manifestupdate.compile(BytesIO(manifest_str),59 test_path,60 "/",61 update_properties,62 update_intermittent,63 remove_intermittent)64 return list(metadata.update_results(id_test_map,65 update_properties,66 full_update,67 disable_intermittent,68 update_intermittent,69 remove_intermittent))70def create_updater(tests, url_base="/", **kwargs):71 id_test_map = {}72 m = create_test_manifest(tests, url_base)73 reset_globals()74 id_test_map = metadata.create_test_tree(None, m)75 return id_test_map, metadata.ExpectedUpdater(id_test_map, **kwargs)76def create_log(entries):77 data = BytesIO()78 if isinstance(entries, list):79 logger = structuredlog.StructuredLogger("expected_test")80 handler = handlers.StreamHandler(data, formatters.JSONFormatter())81 logger.add_handler(handler)82 for item in entries:83 action, kwargs = item84 getattr(logger, action)(**kwargs)85 logger.remove_handler(handler)86 else:87 data.write(json.dumps(entries).encode())88 data.seek(0)89 return data90def suite_log(entries, run_info=None):91 _run_info = default_run_info.copy()92 if run_info:93 _run_info.update(run_info)94 return ([("suite_start", {"tests": [], "run_info": _run_info})] +95 entries +96 [("suite_end", {})])97def create_test_manifest(tests, url_base="/"):98 source_files = []99 for i, (test, _, test_type, _) in enumerate(tests):100 if test_type:101 source_files.append((SourceFileWithTest(test, str(i) * 40, item_classes[test_type]), True))102 m = manifest.Manifest()103 m.update(source_files)104 return m105def test_update_0():106 tests = [("path/to/test.htm", [test_id], "testharness",107 b"""[test.htm]108 [test1]109 expected: FAIL""")]110 log = suite_log([("test_start", {"test": "/path/to/test.htm"}),111 ("test_status", {"test": "/path/to/test.htm",112 "subtest": "test1",113 "status": "PASS",114 "expected": "FAIL"}),115 ("test_end", {"test": "/path/to/test.htm",116 "status": "OK"})])117 updated = update(tests, log)118 assert len(updated) == 1119 assert updated[0][1].is_empty120def test_update_1():121 tests = [("path/to/test.htm", [test_id], "testharness",122 b"""[test.htm]123 [test1]124 expected: ERROR""")]125 log = suite_log([("test_start", {"test": test_id}),126 ("test_status", {"test": test_id,127 "subtest": "test1",128 "status": "FAIL",129 "expected": "ERROR"}),130 ("test_end", {"test": test_id,131 "status": "OK"})])132 updated = update(tests, log)133 new_manifest = updated[0][1]134 assert not new_manifest.is_empty135 assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"136def test_update_known_intermittent_1():137 tests = [("path/to/test.htm", [test_id], "testharness",138 b"""[test.htm]139 [test1]140 expected: PASS""")]141 log_0 = suite_log([("test_start", {"test": test_id}),142 ("test_status", {"test": test_id,143 "subtest": "test1",144 "status": "FAIL",145 "expected": "PASS"}),146 ("test_end", {"test": test_id,147 "status": "OK"})])148 log_1 = suite_log([("test_start", {"test": test_id}),149 ("test_status", {"test": test_id,150 "subtest": "test1",151 "status": "PASS",152 "expected": "PASS"}),153 ("test_end", {"test": test_id,154 "status": "OK"})])155 log_2 = suite_log([("test_start", {"test": test_id}),156 ("test_status", {"test": test_id,157 "subtest": "test1",158 "status": "PASS",159 "expected": "PASS"}),160 ("test_end", {"test": test_id,161 "status": "OK"})])162 updated = update(tests, log_0, log_1, log_2, update_intermittent=True)163 new_manifest = updated[0][1]164 assert not new_manifest.is_empty165 assert new_manifest.get_test(test_id).children[0].get(166 "expected", default_run_info) == ["PASS", "FAIL"]167def test_update_known_intermittent_2():168 tests = [("path/to/test.htm", [test_id], "testharness",169 b"""[test.htm]170 [test1]171 expected: PASS""")]172 log_0 = suite_log([("test_start", {"test": test_id}),173 ("test_status", {"test": test_id,174 "subtest": "test1",175 "status": "FAIL",176 "expected": "PASS"}),177 ("test_end", {"test": test_id,178 "status": "OK"})])179 updated = update(tests, log_0, update_intermittent=True)180 new_manifest = updated[0][1]181 assert not new_manifest.is_empty182 assert new_manifest.get_test(test_id).children[0].get(183 "expected", default_run_info) == "FAIL"184def test_update_existing_known_intermittent():185 tests = [("path/to/test.htm", [test_id], "testharness",186 b"""[test.htm]187 [test1]188 expected: [PASS, FAIL]""")]189 log_0 = suite_log([("test_start", {"test": test_id}),190 ("test_status", {"test": test_id,191 "subtest": "test1",192 "status": "ERROR",193 "expected": "PASS",194 "known_intermittent": ["FAIL"]}),195 ("test_end", {"test": test_id,196 "status": "OK"})])197 log_1 = suite_log([("test_start", {"test": test_id}),198 ("test_status", {"test": test_id,199 "subtest": "test1",200 "status": "PASS",201 "expected": "PASS",202 "known_intermittent": ["FAIL"]}),203 ("test_end", {"test": test_id,204 "status": "OK"})])205 log_2 = suite_log([("test_start", {"test": test_id}),206 ("test_status", {"test": test_id,207 "subtest": "test1",208 "status": "PASS",209 "expected": "PASS",210 "known_intermittent": ["FAIL"]}),211 ("test_end", {"test": test_id,212 "status": "OK"})])213 updated = update(tests, log_0, log_1, log_2, update_intermittent=True)214 new_manifest = updated[0][1]215 assert not new_manifest.is_empty216 assert new_manifest.get_test(test_id).children[0].get(217 "expected", default_run_info) == ["PASS", "ERROR", "FAIL"]218def test_update_remove_previous_intermittent():219 tests = [("path/to/test.htm", [test_id], "testharness",220 b"""[test.htm]221 [test1]222 expected: [PASS, FAIL]""")]223 log_0 = suite_log([("test_start", {"test": test_id}),224 ("test_status", {"test": test_id,225 "subtest": "test1",226 "status": "ERROR",227 "expected": "PASS",228 "known_intermittent": ["FAIL"]}),229 ("test_end", {"test": test_id,230 "status": "OK"})])231 log_1 = suite_log([("test_start", {"test": test_id}),232 ("test_status", {"test": test_id,233 "subtest": "test1",234 "status": "PASS",235 "expected": "PASS",236 "known_intermittent": ["FAIL"]}),237 ("test_end", {"test": test_id,238 "status": "OK"})])239 log_2 = suite_log([("test_start", {"test": test_id}),240 ("test_status", {"test": test_id,241 "subtest": "test1",242 "status": "PASS",243 "expected": "PASS",244 "known_intermittent": ["FAIL"]}),245 ("test_end", {"test": test_id,246 "status": "OK"})])247 updated = update(tests,248 log_0,249 log_1,250 log_2,251 update_intermittent=True,252 remove_intermittent=True)253 new_manifest = updated[0][1]254 assert not new_manifest.is_empty255 assert new_manifest.get_test(test_id).children[0].get(256 "expected", default_run_info) == ["PASS", "ERROR"]257def test_update_new_test_with_intermittent():258 tests = [("path/to/test.htm", [test_id], "testharness", None)]259 log_0 = suite_log([("test_start", {"test": test_id}),260 ("test_status", {"test": test_id,261 "subtest": "test1",262 "status": "PASS",263 "expected": "PASS"}),264 ("test_end", {"test": test_id,265 "status": "OK"})])266 log_1 = suite_log([("test_start", {"test": test_id}),267 ("test_status", {"test": test_id,268 "subtest": "test1",269 "status": "PASS",270 "expected": "PASS"}),271 ("test_end", {"test": test_id,272 "status": "OK"})])273 log_2 = suite_log([("test_start", {"test": test_id}),274 ("test_status", {"test": test_id,275 "subtest": "test1",276 "status": "FAIL",277 "expected": "PASS"}),278 ("test_end", {"test": test_id,279 "status": "OK"})])280 updated = update(tests, log_0, log_1, log_2, update_intermittent=True)281 new_manifest = updated[0][1]282 assert not new_manifest.is_empty283 assert new_manifest.get_test("test.htm") is None284 assert len(new_manifest.get_test(test_id).children) == 1285 assert new_manifest.get_test(test_id).children[0].get(286 "expected", default_run_info) == ["PASS", "FAIL"]287def test_update_expected_tie_resolution():288 tests = [("path/to/test.htm", [test_id], "testharness", None)]289 log_0 = suite_log([("test_start", {"test": test_id}),290 ("test_status", {"test": test_id,291 "subtest": "test1",292 "status": "PASS",293 "expected": "PASS"}),294 ("test_end", {"test": test_id,295 "status": "OK"})])296 log_1 = suite_log([("test_start", {"test": test_id}),297 ("test_status", {"test": test_id,298 "subtest": "test1",299 "status": "FAIL",300 "expected": "PASS"}),301 ("test_end", {"test": test_id,302 "status": "OK"})])303 updated = update(tests, log_0, log_1, update_intermittent=True)304 new_manifest = updated[0][1]305 assert not new_manifest.is_empty306 assert new_manifest.get_test(test_id).children[0].get(307 "expected", default_run_info) == ["PASS", "FAIL"]308def test_update_reorder_expected():309 tests = [("path/to/test.htm", [test_id], "testharness",310 b"""[test.htm]311 [test1]312 expected: [PASS, FAIL]""")]313 log_0 = suite_log([("test_start", {"test": test_id}),314 ("test_status", {"test": test_id,315 "subtest": "test1",316 "status": "FAIL",317 "expected": "PASS",318 "known_intermittent": ["FAIL"]}),319 ("test_end", {"test": test_id,320 "status": "OK"})])321 log_1 = suite_log([("test_start", {"test": test_id}),322 ("test_status", {"test": test_id,323 "subtest": "test1",324 "status": "FAIL",325 "expected": "PASS",326 "known_intermittent": ["FAIL"]}),327 ("test_end", {"test": test_id,328 "status": "OK"})])329 log_2 = suite_log([("test_start", {"test": test_id}),330 ("test_status", {"test": test_id,331 "subtest": "test1",332 "status": "PASS",333 "expected": "PASS",334 "known_intermittent": ["FAIL"]}),335 ("test_end", {"test": test_id,336 "status": "OK"})])337 updated = update(tests, log_0, log_1, log_2, update_intermittent=True)338 new_manifest = updated[0][1]339 assert not new_manifest.is_empty340 assert new_manifest.get_test(test_id).children[0].get(341 "expected", default_run_info) == ["FAIL", "PASS"]342def test_update_and_preserve_unchanged_expected_intermittent():343 tests = [("path/to/test.htm", [test_id], "testharness", b"""344[test.htm]345 expected:346 if os == "android": [PASS, FAIL]347 FAIL""")]348 log_0 = suite_log([("test_start", {"test": test_id}),349 ("test_end", {"test": test_id,350 "status": "FAIL",351 "expected": "PASS",352 "known_intermittent": ["FAIL"]})],353 run_info={"os": "android"})354 log_1 = suite_log([("test_start", {"test": test_id}),355 ("test_end", {"test": test_id,356 "status": "PASS",357 "expected": "PASS",358 "known_intermittent": ["FAIL"]})],359 run_info={"os": "android"})360 log_2 = suite_log([("test_start", {"test": test_id}),361 ("test_end", {"test": test_id,362 "status": "PASS",363 "expected": "FAIL"})])364 updated = update(tests, log_0, log_1, log_2)365 new_manifest = updated[0][1]366 assert not new_manifest.is_empty367 run_info_1 = default_run_info.copy()368 run_info_1.update({"os": "android"})369 assert not new_manifest.is_empty370 assert new_manifest.get_test(test_id).get(371 "expected", run_info_1) == ["PASS", "FAIL"]372 assert new_manifest.get_test(test_id).get(373 "expected", default_run_info) == "PASS"374def test_update_test_with_intermittent_to_one_expected_status():375 tests = [("path/to/test.htm", [test_id], "testharness",376 b"""[test.htm]377 [test1]378 expected: [PASS, FAIL]""")]379 log_0 = suite_log([("test_start", {"test": test_id}),380 ("test_status", {"test": test_id,381 "subtest": "test1",382 "status": "ERROR",383 "expected": "PASS",384 "known_intermittent": ["FAIL"]}),385 ("test_end", {"test": test_id,386 "status": "OK"})])387 updated = update(tests, log_0)388 new_manifest = updated[0][1]389 assert not new_manifest.is_empty390 assert new_manifest.get_test(test_id).children[0].get(391 "expected", default_run_info) == "ERROR"392def test_update_intermittent_with_conditions():393 tests = [("path/to/test.htm", [test_id], "testharness", b"""394[test.htm]395 expected:396 if os == "android": [PASS, FAIL]""")]397 log_0 = suite_log([("test_start", {"test": test_id}),398 ("test_end", {"test": test_id,399 "status": "TIMEOUT",400 "expected": "PASS",401 "known_intermittent": ["FAIL"]})],402 run_info={"os": "android"})403 log_1 = suite_log([("test_start", {"test": test_id}),404 ("test_end", {"test": test_id,405 "status": "PASS",406 "expected": "PASS",407 "known_intermittent": ["FAIL"]})],408 run_info={"os": "android"})409 updated = update(tests, log_0, log_1, update_intermittent=True)410 new_manifest = updated[0][1]411 assert not new_manifest.is_empty412 run_info_1 = default_run_info.copy()413 run_info_1.update({"os": "android"})414 assert not new_manifest.is_empty415 assert new_manifest.get_test(test_id).get(416 "expected", run_info_1) == ["PASS", "TIMEOUT", "FAIL"]417def test_update_and_remove_intermittent_with_conditions():418 tests = [("path/to/test.htm", [test_id], "testharness", b"""419[test.htm]420 expected:421 if os == "android": [PASS, FAIL]""")]422 log_0 = suite_log([("test_start", {"test": test_id}),423 ("test_end", {"test": test_id,424 "status": "TIMEOUT",425 "expected": "PASS",426 "known_intermittent": ["FAIL"]})],427 run_info={"os": "android"})428 log_1 = suite_log([("test_start", {"test": test_id}),429 ("test_end", {"test": test_id,430 "status": "PASS",431 "expected": "PASS",432 "known_intermittent": ["FAIL"]})],433 run_info={"os": "android"})434 updated = update(tests, log_0, log_1, update_intermittent=True, remove_intermittent=True)435 new_manifest = updated[0][1]436 assert not new_manifest.is_empty437 run_info_1 = default_run_info.copy()438 run_info_1.update({"os": "android"})439 assert not new_manifest.is_empty440 assert new_manifest.get_test(test_id).get(441 "expected", run_info_1) == ["PASS", "TIMEOUT"]442def test_update_intermittent_full():443 tests = [("path/to/test.htm", [test_id], "testharness",444 b"""[test.htm]445 [test1]446 expected:447 if os == "mac": [FAIL, TIMEOUT]448 FAIL""")]449 log_0 = suite_log([("test_start", {"test": test_id}),450 ("test_status", {"test": test_id,451 "subtest": "test1",452 "status": "FAIL",453 "expected": "FAIL",454 "known_intermittent": ["TIMEOUT"]}),455 ("test_end", {"test": test_id,456 "status": "OK"})],457 run_info={"os": "mac"})458 log_1 = suite_log([("test_start", {"test": test_id}),459 ("test_status", {"test": test_id,460 "subtest": "test1",461 "status": "FAIL"}),462 ("test_end", {"test": test_id,463 "status": "OK"})])464 updated = update(tests, log_0, log_1, update_intermittent=True, full_update=True)465 new_manifest = updated[0][1]466 assert not new_manifest.is_empty467 run_info_1 = default_run_info.copy()468 run_info_1.update({"os": "mac"})469 assert new_manifest.get_test(test_id).children[0].get(470 "expected", run_info_1) == ["FAIL", "TIMEOUT"]471 assert new_manifest.get_test(test_id).children[0].get(472 "expected", default_run_info) == "FAIL"473def test_update_intermittent_full_remove():474 tests = [("path/to/test.htm", [test_id], "testharness",475 b"""[test.htm]476 [test1]477 expected:478 if os == "mac": [FAIL, TIMEOUT, PASS]479 FAIL""")]480 log_0 = suite_log([("test_start", {"test": test_id}),481 ("test_status", {"test": test_id,482 "subtest": "test1",483 "status": "FAIL",484 "expected": "FAIL",485 "known_intermittent": ["TIMEOUT", "PASS"]}),486 ("test_end", {"test": test_id,487 "status": "OK"})],488 run_info={"os": "mac"})489 log_1 = suite_log([("test_start", {"test": test_id}),490 ("test_status", {"test": test_id,491 "subtest": "test1",492 "status": "TIMEOUT",493 "expected": "FAIL",494 "known_intermittent": ["TIMEOUT", "PASS"]}),495 ("test_end", {"test": test_id,496 "status": "OK"})],497 run_info={"os": "mac"})498 log_2 = suite_log([("test_start", {"test": test_id}),499 ("test_status", {"test": test_id,500 "subtest": "test1",501 "status": "FAIL"}),502 ("test_end", {"test": test_id,503 "status": "OK"})])504 updated = update(tests, log_0, log_1, log_2, update_intermittent=True,505 full_update=True, remove_intermittent=True)506 new_manifest = updated[0][1]507 assert not new_manifest.is_empty508 run_info_1 = default_run_info.copy()509 run_info_1.update({"os": "mac"})510 assert new_manifest.get_test(test_id).children[0].get(511 "expected", run_info_1) == ["FAIL", "TIMEOUT"]512 assert new_manifest.get_test(test_id).children[0].get(513 "expected", default_run_info) == "FAIL"514def test_full_update():515 tests = [("path/to/test.htm", [test_id], "testharness",516 b"""[test.htm]517 [test1]518 expected:519 if os == "mac": [FAIL, TIMEOUT]520 FAIL""")]521 log_0 = suite_log([("test_start", {"test": test_id}),522 ("test_status", {"test": test_id,523 "subtest": "test1",524 "status": "FAIL",525 "expected": "FAIL",526 "known_intermittent": ["TIMEOUT"]}),527 ("test_end", {"test": test_id,528 "status": "OK"})],529 run_info={"os": "mac"})530 log_1 = suite_log([("test_start", {"test": test_id}),531 ("test_status", {"test": test_id,532 "subtest": "test1",533 "status": "FAIL"}),534 ("test_end", {"test": test_id,535 "status": "OK"})])536 updated = update(tests, log_0, log_1, full_update=True)537 new_manifest = updated[0][1]538 assert not new_manifest.is_empty539 run_info_1 = default_run_info.copy()540 run_info_1.update({"os": "mac"})541 assert new_manifest.get_test(test_id).children[0].get(542 "expected", run_info_1) == "FAIL"543 assert new_manifest.get_test(test_id).children[0].get(544 "expected", default_run_info) == "FAIL"545def test_full_orphan():546 tests = [("path/to/test.htm", [test_id], "testharness",547 b"""[test.htm]548 [test1]549 expected: FAIL550 [subsub test]551 expected: TIMEOUT552 [test2]553 expected: ERROR554""")]555 log_0 = suite_log([("test_start", {"test": test_id}),556 ("test_status", {"test": test_id,557 "subtest": "test1",558 "status": "FAIL",559 "expected": "FAIL"}),560 ("test_end", {"test": test_id,561 "status": "OK"})])562 updated = update(tests, log_0, full_update=True)563 new_manifest = updated[0][1]564 assert not new_manifest.is_empty565 assert len(new_manifest.get_test(test_id).children[0].children) == 0566 assert new_manifest.get_test(test_id).children[0].get(567 "expected", default_run_info) == "FAIL"568 assert len(new_manifest.get_test(test_id).children) == 1569def test_update_reorder_expected_full_conditions():570 tests = [("path/to/test.htm", [test_id], "testharness",571 b"""[test.htm]572 [test1]573 expected:574 if os == "mac": [FAIL, TIMEOUT]575 [FAIL, PASS]""")]576 log_0 = suite_log([("test_start", {"test": test_id}),577 ("test_status", {"test": test_id,578 "subtest": "test1",579 "status": "TIMEOUT",580 "expected": "FAIL",581 "known_intermittent": ["TIMEOUT"]}),582 ("test_end", {"test": test_id,583 "status": "OK"})],584 run_info={"os": "mac"})585 log_1 = suite_log([("test_start", {"test": test_id}),586 ("test_status", {"test": test_id,587 "subtest": "test1",588 "status": "TIMEOUT",589 "expected": "FAIL",590 "known_intermittent": ["TIMEOUT"]}),591 ("test_end", {"test": test_id,592 "status": "OK"})],593 run_info={"os": "mac"})594 log_2 = suite_log([("test_start", {"test": test_id}),595 ("test_status", {"test": test_id,596 "subtest": "test1",597 "status": "PASS",598 "expected": "FAIL",599 "known_intermittent": ["PASS"]}),600 ("test_end", {"test": test_id,601 "status": "OK"})])602 log_3 = suite_log([("test_start", {"test": test_id}),603 ("test_status", {"test": test_id,604 "subtest": "test1",605 "status": "PASS",606 "expected": "FAIL",607 "known_intermittent": ["PASS"]}),608 ("test_end", {"test": test_id,609 "status": "OK"})])610 updated = update(tests, log_0, log_1, log_2, log_3, update_intermittent=True, full_update=True)611 new_manifest = updated[0][1]612 assert not new_manifest.is_empty613 run_info_1 = default_run_info.copy()614 run_info_1.update({"os": "mac"})615 assert new_manifest.get_test(test_id).children[0].get(616 "expected", run_info_1) == ["TIMEOUT", "FAIL"]617 assert new_manifest.get_test(test_id).children[0].get(618 "expected", default_run_info) == ["PASS", "FAIL"]619def test_skip_0():620 tests = [("path/to/test.htm", [test_id], "testharness",621 b"""[test.htm]622 [test1]623 expected: FAIL""")]624 log = suite_log([("test_start", {"test": test_id}),625 ("test_status", {"test": test_id,626 "subtest": "test1",627 "status": "FAIL",628 "expected": "FAIL"}),629 ("test_end", {"test": test_id,630 "status": "OK"})])631 updated = update(tests, log)632 assert not updated633def test_new_subtest():634 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]635 [test1]636 expected: FAIL""")]637 log = suite_log([("test_start", {"test": test_id}),638 ("test_status", {"test": test_id,639 "subtest": "test1",640 "status": "FAIL",641 "expected": "FAIL"}),642 ("test_status", {"test": test_id,643 "subtest": "test2",644 "status": "FAIL",645 "expected": "PASS"}),646 ("test_end", {"test": test_id,647 "status": "OK"})])648 updated = update(tests, log)649 new_manifest = updated[0][1]650 assert not new_manifest.is_empty651 assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"652 assert new_manifest.get_test(test_id).children[1].get("expected", default_run_info) == "FAIL"653def test_update_multiple_0():654 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]655 [test1]656 expected: FAIL""")]657 log_0 = suite_log([("test_start", {"test": test_id}),658 ("test_status", {"test": test_id,659 "subtest": "test1",660 "status": "FAIL",661 "expected": "FAIL"}),662 ("test_end", {"test": test_id,663 "status": "OK"})],664 run_info={"debug": False, "os": "osx"})665 log_1 = suite_log([("test_start", {"test": test_id}),666 ("test_status", {"test": test_id,667 "subtest": "test1",668 "status": "TIMEOUT",669 "expected": "FAIL"}),670 ("test_end", {"test": test_id,671 "status": "OK"})],672 run_info={"debug": False, "os": "linux"})673 updated = update(tests, log_0, log_1)674 new_manifest = updated[0][1]675 assert not new_manifest.is_empty676 run_info_1 = default_run_info.copy()677 run_info_1.update({"debug": False, "os": "osx"})678 run_info_2 = default_run_info.copy()679 run_info_2.update({"debug": False, "os": "linux"})680 assert new_manifest.get_test(test_id).children[0].get(681 "expected", run_info_1) == "FAIL"682 assert new_manifest.get_test(test_id).children[0].get(683 "expected", {"debug": False, "os": "linux"}) == "TIMEOUT"684def test_update_multiple_1():685 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]686 [test1]687 expected: FAIL""")]688 log_0 = suite_log([("test_start", {"test": test_id}),689 ("test_status", {"test": test_id,690 "subtest": "test1",691 "status": "FAIL",692 "expected": "FAIL"}),693 ("test_end", {"test": test_id,694 "status": "OK"})],695 run_info={"os": "osx"})696 log_1 = suite_log([("test_start", {"test": test_id}),697 ("test_status", {"test": test_id,698 "subtest": "test1",699 "status": "TIMEOUT",700 "expected": "FAIL"}),701 ("test_end", {"test": test_id,702 "status": "OK"})],703 run_info={"os": "linux"})704 updated = update(tests, log_0, log_1)705 new_manifest = updated[0][1]706 assert not new_manifest.is_empty707 run_info_1 = default_run_info.copy()708 run_info_1.update({"os": "osx"})709 run_info_2 = default_run_info.copy()710 run_info_2.update({"os": "linux"})711 run_info_3 = default_run_info.copy()712 run_info_3.update({"os": "win"})713 assert new_manifest.get_test(test_id).children[0].get(714 "expected", run_info_1) == "FAIL"715 assert new_manifest.get_test(test_id).children[0].get(716 "expected", run_info_2) == "TIMEOUT"717 assert new_manifest.get_test(test_id).children[0].get(718 "expected", run_info_3) == "FAIL"719def test_update_multiple_2():720 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]721 [test1]722 expected: FAIL""")]723 log_0 = suite_log([("test_start", {"test": test_id}),724 ("test_status", {"test": test_id,725 "subtest": "test1",726 "status": "FAIL",727 "expected": "FAIL"}),728 ("test_end", {"test": test_id,729 "status": "OK"})],730 run_info={"debug": False, "os": "osx"})731 log_1 = suite_log([("test_start", {"test": test_id}),732 ("test_status", {"test": test_id,733 "subtest": "test1",734 "status": "TIMEOUT",735 "expected": "FAIL"}),736 ("test_end", {"test": test_id,737 "status": "OK"})],738 run_info={"debug": True, "os": "osx"})739 updated = update(tests, log_0, log_1)740 new_manifest = updated[0][1]741 run_info_1 = default_run_info.copy()742 run_info_1.update({"debug": False, "os": "osx"})743 run_info_2 = default_run_info.copy()744 run_info_2.update({"debug": True, "os": "osx"})745 assert not new_manifest.is_empty746 assert new_manifest.get_test(test_id).children[0].get(747 "expected", run_info_1) == "FAIL"748 assert new_manifest.get_test(test_id).children[0].get(749 "expected", run_info_2) == "TIMEOUT"750def test_update_multiple_3():751 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]752 [test1]753 expected:754 if debug: FAIL755 if not debug and os == "osx": TIMEOUT""")]756 log_0 = suite_log([("test_start", {"test": test_id}),757 ("test_status", {"test": test_id,758 "subtest": "test1",759 "status": "FAIL",760 "expected": "FAIL"}),761 ("test_end", {"test": test_id,762 "status": "OK"})],763 run_info={"debug": False, "os": "osx"})764 log_1 = suite_log([("test_start", {"test": test_id}),765 ("test_status", {"test": test_id,766 "subtest": "test1",767 "status": "TIMEOUT",768 "expected": "FAIL"}),769 ("test_end", {"test": test_id,770 "status": "OK"})],771 run_info={"debug": True, "os": "osx"})772 updated = update(tests, log_0, log_1)773 new_manifest = updated[0][1]774 run_info_1 = default_run_info.copy()775 run_info_1.update({"debug": False, "os": "osx"})776 run_info_2 = default_run_info.copy()777 run_info_2.update({"debug": True, "os": "osx"})778 assert not new_manifest.is_empty779 assert new_manifest.get_test(test_id).children[0].get(780 "expected", run_info_1) == "FAIL"781 assert new_manifest.get_test(test_id).children[0].get(782 "expected", run_info_2) == "TIMEOUT"783def test_update_ignore_existing():784 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]785 [test1]786 expected:787 if debug: TIMEOUT788 if not debug and os == "osx": NOTRUN""")]789 log_0 = suite_log([("test_start", {"test": test_id}),790 ("test_status", {"test": test_id,791 "subtest": "test1",792 "status": "FAIL",793 "expected": "PASS"}),794 ("test_end", {"test": test_id,795 "status": "OK"})],796 run_info={"debug": False, "os": "linux"})797 log_1 = suite_log([("test_start", {"test": test_id}),798 ("test_status", {"test": test_id,799 "subtest": "test1",800 "status": "FAIL",801 "expected": "PASS"}),802 ("test_end", {"test": test_id,803 "status": "OK"})],804 run_info={"debug": True, "os": "windows"})805 updated = update(tests, log_0, log_1)806 new_manifest = updated[0][1]807 run_info_1 = default_run_info.copy()808 run_info_1.update({"debug": False, "os": "linux"})809 run_info_2 = default_run_info.copy()810 run_info_2.update({"debug": False, "os": "osx"})811 assert not new_manifest.is_empty812 assert new_manifest.get_test(test_id).children[0].get(813 "expected", run_info_1) == "FAIL"814 assert new_manifest.get_test(test_id).children[0].get(815 "expected", run_info_2) == "NOTRUN"816def test_update_new_test():817 tests = [("path/to/test.htm", [test_id], "testharness", None)]818 log_0 = suite_log([("test_start", {"test": test_id}),819 ("test_status", {"test": test_id,820 "subtest": "test1",821 "status": "FAIL",822 "expected": "PASS"}),823 ("test_end", {"test": test_id,824 "status": "OK"})])825 updated = update(tests, log_0)826 new_manifest = updated[0][1]827 run_info_1 = default_run_info.copy()828 assert not new_manifest.is_empty829 assert new_manifest.get_test("test.htm") is None830 assert len(new_manifest.get_test(test_id).children) == 1831 assert new_manifest.get_test(test_id).children[0].get(832 "expected", run_info_1) == "FAIL"833def test_update_duplicate():834 tests = [("path/to/test.htm", [test_id], "testharness", b"""835[test.htm]836 expected: ERROR""")]837 log_0 = suite_log([("test_start", {"test": test_id}),838 ("test_end", {"test": test_id,839 "status": "PASS"})])840 log_1 = suite_log([("test_start", {"test": test_id}),841 ("test_end", {"test": test_id,842 "status": "FAIL"})])843 updated = update(tests, log_0, log_1)844 new_manifest = updated[0][1]845 run_info_1 = default_run_info.copy()846 assert new_manifest.get_test(test_id).get(847 "expected", run_info_1) == "ERROR"848def test_update_disable_intermittent():849 tests = [("path/to/test.htm", [test_id], "testharness", b"""850[test.htm]851 expected: ERROR""")]852 log_0 = suite_log([("test_start", {"test": test_id}),853 ("test_end", {"test": test_id,854 "status": "PASS"})])855 log_1 = suite_log([("test_start", {"test": test_id}),856 ("test_end", {"test": test_id,857 "status": "FAIL"})])858 updated = update(tests, log_0, log_1, disable_intermittent="Some message")859 new_manifest = updated[0][1]860 run_info_1 = default_run_info.copy()861 assert new_manifest.get_test(test_id).get(862 "disabled", run_info_1) == "Some message"863def test_update_stability_conditional_instability():864 tests = [("path/to/test.htm", [test_id], "testharness", b"""865[test.htm]866 expected: ERROR""")]867 log_0 = suite_log([("test_start", {"test": test_id}),868 ("test_end", {"test": test_id,869 "status": "PASS"})],870 run_info={"os": "linux"})871 log_1 = suite_log([("test_start", {"test": test_id}),872 ("test_end", {"test": test_id,873 "status": "FAIL"})],874 run_info={"os": "linux"})875 log_2 = suite_log([("test_start", {"test": test_id}),876 ("test_end", {"test": test_id,877 "status": "FAIL"})],878 run_info={"os": "mac"})879 updated = update(tests, log_0, log_1, log_2, disable_intermittent="Some message")880 new_manifest = updated[0][1]881 run_info_1 = default_run_info.copy()882 run_info_1.update({"os": "linux"})883 run_info_2 = default_run_info.copy()884 run_info_2.update({"os": "mac"})885 assert new_manifest.get_test(test_id).get(886 "disabled", run_info_1) == "Some message"887 with pytest.raises(KeyError):888 assert new_manifest.get_test(test_id).get(889 "disabled", run_info_2)890 assert new_manifest.get_test(test_id).get(891 "expected", run_info_2) == "FAIL"892def test_update_full():893 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]894 [test1]895 expected:896 if debug: TIMEOUT897 if not debug and os == "osx": NOTRUN898 [test2]899 expected: FAIL900[test.js]901 [test1]902 expected: FAIL903""")]904 log_0 = suite_log([("test_start", {"test": test_id}),905 ("test_status", {"test": test_id,906 "subtest": "test1",907 "status": "FAIL",908 "expected": "PASS"}),909 ("test_end", {"test": test_id,910 "status": "OK"})],911 run_info={"debug": False})912 log_1 = suite_log([("test_start", {"test": test_id}),913 ("test_status", {"test": test_id,914 "subtest": "test1",915 "status": "ERROR",916 "expected": "PASS"}),917 ("test_end", {"test": test_id,918 "status": "OK"})],919 run_info={"debug": True})920 updated = update(tests, log_0, log_1, full_update=True)921 new_manifest = updated[0][1]922 run_info_1 = default_run_info.copy()923 run_info_1.update({"debug": False, "os": "win"})924 run_info_2 = default_run_info.copy()925 run_info_2.update({"debug": True, "os": "osx"})926 assert not new_manifest.is_empty927 assert new_manifest.get_test("test.js") is None928 assert len(new_manifest.get_test(test_id).children) == 1929 assert new_manifest.get_test(test_id).children[0].get(930 "expected", run_info_1) == "FAIL"931 assert new_manifest.get_test(test_id).children[0].get(932 "expected", run_info_2) == "ERROR"933def test_update_full_unknown():934 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]935 [test1]936 expected:937 if release_or_beta: ERROR938 if not debug and os == "osx": NOTRUN939""")]940 log_0 = suite_log([("test_start", {"test": test_id}),941 ("test_status", {"test": test_id,942 "subtest": "test1",943 "status": "FAIL",944 "expected": "PASS"}),945 ("test_end", {"test": test_id,946 "status": "OK"})],947 run_info={"debug": False, "release_or_beta": False})948 log_1 = suite_log([("test_start", {"test": test_id}),949 ("test_status", {"test": test_id,950 "subtest": "test1",951 "status": "FAIL",952 "expected": "PASS"}),953 ("test_end", {"test": test_id,954 "status": "OK"})],955 run_info={"debug": True, "release_or_beta": False})956 updated = update(tests, log_0, log_1, full_update=True)957 new_manifest = updated[0][1]958 run_info_1 = default_run_info.copy()959 run_info_1.update({"release_or_beta": False})960 run_info_2 = default_run_info.copy()961 run_info_2.update({"release_or_beta": True})962 assert not new_manifest.is_empty963 assert new_manifest.get_test(test_id).children[0].get(964 "expected", run_info_1) == "FAIL"965 assert new_manifest.get_test(test_id).children[0].get(966 "expected", run_info_2) == "ERROR"967def test_update_full_unknown_missing():968 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]969 [subtest_deleted]970 expected:971 if release_or_beta: ERROR972 FAIL973""")]974 log_0 = suite_log([("test_start", {"test": test_id}),975 ("test_status", {"test": test_id,976 "subtest": "test1",977 "status": "PASS",978 "expected": "PASS"}),979 ("test_end", {"test": test_id,980 "status": "OK"})],981 run_info={"debug": False, "release_or_beta": False})982 updated = update(tests, log_0, full_update=True)983 assert len(updated) == 0984def test_update_default():985 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]986 [test1]987 expected:988 if os == "mac": FAIL989 ERROR""")]990 log_0 = suite_log([("test_start", {"test": test_id}),991 ("test_status", {"test": test_id,992 "subtest": "test1",993 "status": "PASS",994 "expected": "FAIL"}),995 ("test_end", {"test": test_id,996 "status": "OK"})],997 run_info={"os": "mac"})998 log_1 = suite_log([("test_start", {"test": test_id}),999 ("test_status", {"test": test_id,1000 "subtest": "test1",1001 "status": "PASS",1002 "expected": "ERROR"}),1003 ("test_end", {"test": test_id,1004 "status": "OK"})],1005 run_info={"os": "linux"})1006 updated = update(tests, log_0, log_1)1007 new_manifest = updated[0][1]1008 assert new_manifest.is_empty1009def test_update_default_1():1010 tests = [("path/to/test.htm", [test_id], "testharness", b"""1011[test.htm]1012 expected:1013 if os == "mac": TIMEOUT1014 ERROR""")]1015 log_0 = suite_log([("test_start", {"test": test_id}),1016 ("test_end", {"test": test_id,1017 "expected": "ERROR",1018 "status": "FAIL"})],1019 run_info={"os": "linux"})1020 updated = update(tests, log_0)1021 new_manifest = updated[0][1]1022 assert not new_manifest.is_empty1023 run_info_1 = default_run_info.copy()1024 run_info_1.update({"os": "mac"})1025 run_info_2 = default_run_info.copy()1026 run_info_2.update({"os": "win"})1027 assert not new_manifest.is_empty1028 assert new_manifest.get_test(test_id).get(1029 "expected", run_info_1) == "TIMEOUT"1030 assert new_manifest.get_test(test_id).get(1031 "expected", run_info_2) == "FAIL"1032def test_update_default_2():1033 tests = [("path/to/test.htm", [test_id], "testharness", b"""1034[test.htm]1035 expected:1036 if os == "mac": TIMEOUT1037 ERROR""")]1038 log_0 = suite_log([("test_start", {"test": test_id}),1039 ("test_end", {"test": test_id,1040 "expected": "ERROR",1041 "status": "TIMEOUT"})],1042 run_info={"os": "linux"})1043 updated = update(tests, log_0)1044 new_manifest = updated[0][1]1045 assert not new_manifest.is_empty1046 run_info_1 = default_run_info.copy()1047 run_info_1.update({"os": "mac"})1048 run_info_2 = default_run_info.copy()1049 run_info_2.update({"os": "win"})1050 assert not new_manifest.is_empty1051 assert new_manifest.get_test(test_id).get(1052 "expected", run_info_1) == "TIMEOUT"1053 assert new_manifest.get_test(test_id).get(1054 "expected", run_info_2) == "TIMEOUT"1055def test_update_assertion_count_0():1056 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]1057 max-asserts: 41058 min-asserts: 21059""")]1060 log_0 = suite_log([("test_start", {"test": test_id}),1061 ("assertion_count", {"test": test_id,1062 "count": 6,1063 "min_expected": 2,1064 "max_expected": 4}),1065 ("test_end", {"test": test_id,1066 "status": "OK"})])1067 updated = update(tests, log_0)1068 new_manifest = updated[0][1]1069 assert not new_manifest.is_empty1070 assert new_manifest.get_test(test_id).get("max-asserts") == "7"1071 assert new_manifest.get_test(test_id).get("min-asserts") == "2"1072def test_update_assertion_count_1():1073 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]1074 max-asserts: 41075 min-asserts: 21076""")]1077 log_0 = suite_log([("test_start", {"test": test_id}),1078 ("assertion_count", {"test": test_id,1079 "count": 1,1080 "min_expected": 2,1081 "max_expected": 4}),1082 ("test_end", {"test": test_id,1083 "status": "OK"})])1084 updated = update(tests, log_0)1085 new_manifest = updated[0][1]1086 assert not new_manifest.is_empty1087 assert new_manifest.get_test(test_id).get("max-asserts") == "4"1088 assert new_manifest.get_test(test_id).has_key("min-asserts") is False1089def test_update_assertion_count_2():1090 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]1091 max-asserts: 41092 min-asserts: 21093""")]1094 log_0 = suite_log([("test_start", {"test": test_id}),1095 ("assertion_count", {"test": test_id,1096 "count": 3,1097 "min_expected": 2,1098 "max_expected": 4}),1099 ("test_end", {"test": test_id,1100 "status": "OK"})])1101 updated = update(tests, log_0)1102 assert not updated1103def test_update_assertion_count_3():1104 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]1105 max-asserts: 41106 min-asserts: 21107""")]1108 log_0 = suite_log([("test_start", {"test": test_id}),1109 ("assertion_count", {"test": test_id,1110 "count": 6,1111 "min_expected": 2,1112 "max_expected": 4}),1113 ("test_end", {"test": test_id,1114 "status": "OK"})],1115 run_info={"os": "windows"})1116 log_1 = suite_log([("test_start", {"test": test_id}),1117 ("assertion_count", {"test": test_id,1118 "count": 7,1119 "min_expected": 2,1120 "max_expected": 4}),1121 ("test_end", {"test": test_id,1122 "status": "OK"})],1123 run_info={"os": "linux"})1124 updated = update(tests, log_0, log_1)1125 new_manifest = updated[0][1]1126 assert not new_manifest.is_empty1127 assert new_manifest.get_test(test_id).get("max-asserts") == "8"1128 assert new_manifest.get_test(test_id).get("min-asserts") == "2"1129def test_update_assertion_count_4():1130 tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]""")]1131 log_0 = suite_log([("test_start", {"test": test_id}),1132 ("assertion_count", {"test": test_id,1133 "count": 6,1134 "min_expected": 0,1135 "max_expected": 0}),1136 ("test_end", {"test": test_id,1137 "status": "OK"})],1138 run_info={"os": "windows"})1139 log_1 = suite_log([("test_start", {"test": test_id}),1140 ("assertion_count", {"test": test_id,1141 "count": 7,1142 "min_expected": 0,1143 "max_expected": 0}),1144 ("test_end", {"test": test_id,1145 "status": "OK"})],1146 run_info={"os": "linux"})1147 updated = update(tests, log_0, log_1)1148 new_manifest = updated[0][1]1149 assert not new_manifest.is_empty1150 assert new_manifest.get_test(test_id).get("max-asserts") == "8"1151 assert new_manifest.get_test(test_id).has_key("min-asserts") is False1152def test_update_lsan_0():1153 tests = [("path/to/test.htm", [test_id], "testharness", b""),1154 ("path/to/__dir__", [dir_id], None, b"")]1155 log_0 = suite_log([("lsan_leak", {"scope": "path/to/",1156 "frames": ["foo", "bar"]})])1157 updated = update(tests, log_0)1158 new_manifest = updated[0][1]1159 assert not new_manifest.is_empty1160 assert new_manifest.get("lsan-allowed") == ["foo"]1161def test_update_lsan_1():1162 tests = [("path/to/test.htm", [test_id], "testharness", b""),1163 ("path/to/__dir__", [dir_id], None, b"""1164lsan-allowed: [foo]""")]1165 log_0 = suite_log([("lsan_leak", {"scope": "path/to/",1166 "frames": ["foo", "bar"]}),1167 ("lsan_leak", {"scope": "path/to/",1168 "frames": ["baz", "foobar"]})])1169 updated = update(tests, log_0)1170 new_manifest = updated[0][1]1171 assert not new_manifest.is_empty1172 assert new_manifest.get("lsan-allowed") == ["baz", "foo"]1173def test_update_lsan_2():1174 tests = [("path/to/test.htm", [test_id], "testharness", b""),1175 ("path/__dir__", ["path/__dir__"], None, b"""1176lsan-allowed: [foo]"""),1177 ("path/to/__dir__", [dir_id], None, b"")]1178 log_0 = suite_log([("lsan_leak", {"scope": "path/to/",1179 "frames": ["foo", "bar"],1180 "allowed_match": ["foo"]}),1181 ("lsan_leak", {"scope": "path/to/",1182 "frames": ["baz", "foobar"]})])1183 updated = update(tests, log_0)1184 new_manifest = updated[0][1]1185 assert not new_manifest.is_empty1186 assert new_manifest.get("lsan-allowed") == ["baz"]1187def test_update_lsan_3():1188 tests = [("path/to/test.htm", [test_id], "testharness", b""),1189 ("path/to/__dir__", [dir_id], None, b"")]1190 log_0 = suite_log([("lsan_leak", {"scope": "path/to/",1191 "frames": ["foo", "bar"]})],1192 run_info={"os": "win"})1193 log_1 = suite_log([("lsan_leak", {"scope": "path/to/",1194 "frames": ["baz", "foobar"]})],1195 run_info={"os": "linux"})1196 updated = update(tests, log_0, log_1)1197 new_manifest = updated[0][1]1198 assert not new_manifest.is_empty1199 assert new_manifest.get("lsan-allowed") == ["baz", "foo"]1200def test_update_wptreport_0():1201 tests = [("path/to/test.htm", [test_id], "testharness",1202 b"""[test.htm]1203 [test1]1204 expected: FAIL""")]1205 log = {"run_info": default_run_info.copy(),1206 "results": [1207 {"test": "/path/to/test.htm",1208 "subtests": [{"name": "test1",1209 "status": "PASS",1210 "expected": "FAIL"}],1211 "status": "OK"}1212 ]}1213 updated = update(tests, log)1214 assert len(updated) == 11215 assert updated[0][1].is_empty1216def test_update_wptreport_1():1217 tests = [("path/to/test.htm", [test_id], "testharness", b""),1218 ("path/to/__dir__", [dir_id], None, b"")]1219 log = {"run_info": default_run_info.copy(),1220 "results": [],1221 "lsan_leaks": [{"scope": "path/to/",1222 "frames": ["baz", "foobar"]}]}1223 updated = update(tests, log)1224 assert len(updated) == 11225 assert updated[0][1].get("lsan-allowed") == ["baz"]1226def test_update_leak_total_0():1227 tests = [("path/to/test.htm", [test_id], "testharness", b""),1228 ("path/to/__dir__", [dir_id], None, b"")]1229 log_0 = suite_log([("mozleak_total", {"scope": "path/to/",1230 "process": "default",1231 "bytes": 100,1232 "threshold": 0,1233 "objects": []})])1234 updated = update(tests, log_0)1235 new_manifest = updated[0][1]1236 assert not new_manifest.is_empty1237 assert new_manifest.get("leak-threshold") == ['default:51200']1238def test_update_leak_total_1():1239 tests = [("path/to/test.htm", [test_id], "testharness", b""),1240 ("path/to/__dir__", [dir_id], None, b"")]1241 log_0 = suite_log([("mozleak_total", {"scope": "path/to/",1242 "process": "default",1243 "bytes": 100,1244 "threshold": 1000,1245 "objects": []})])1246 updated = update(tests, log_0)1247 assert not updated1248def test_update_leak_total_2():1249 tests = [("path/to/test.htm", [test_id], "testharness", b""),1250 ("path/to/__dir__", [dir_id], None, b"""1251leak-total: 110""")]1252 log_0 = suite_log([("mozleak_total", {"scope": "path/to/",1253 "process": "default",1254 "bytes": 100,1255 "threshold": 110,1256 "objects": []})])1257 updated = update(tests, log_0)1258 assert not updated1259def test_update_leak_total_3():1260 tests = [("path/to/test.htm", [test_id], "testharness", b""),1261 ("path/to/__dir__", [dir_id], None, b"""1262leak-total: 100""")]1263 log_0 = suite_log([("mozleak_total", {"scope": "path/to/",1264 "process": "default",1265 "bytes": 1000,1266 "threshold": 100,1267 "objects": []})])1268 updated = update(tests, log_0)1269 new_manifest = updated[0][1]1270 assert not new_manifest.is_empty1271 assert new_manifest.get("leak-threshold") == ['default:51200']1272def test_update_leak_total_4():1273 tests = [("path/to/test.htm", [test_id], "testharness", b""),1274 ("path/to/__dir__", [dir_id], None, b"""1275leak-total: 110""")]1276 log_0 = suite_log([1277 ("lsan_leak", {"scope": "path/to/",1278 "frames": ["foo", "bar"]}),1279 ("mozleak_total", {"scope": "path/to/",1280 "process": "default",1281 "bytes": 100,1282 "threshold": 110,1283 "objects": []})])1284 updated = update(tests, log_0)1285 new_manifest = updated[0][1]1286 assert not new_manifest.is_empty1287 assert new_manifest.has_key("leak-threshold") is False1288class TestStep(Step):1289 def create(self, state):1290 tests = [("path/to/test.htm", [test_id], "testharness", "")]1291 state.foo = create_test_manifest(tests)1292class UpdateRunner(StepRunner):1293 steps = [TestStep]1294def test_update_pickle():1295 logger = structuredlog.StructuredLogger("expected_test")1296 args = {1297 "test_paths": {1298 "/": {"tests_path": os.path.abspath(os.path.join(here,1299 os.pardir,1300 os.pardir,1301 os.pardir,1302 os.pardir))},1303 },1304 "abort": False,1305 "continue": False,1306 "sync": False,1307 }1308 args2 = args.copy()1309 args2["abort"] = True1310 wptupdate = WPTUpdate(logger, **args2)1311 wptupdate = WPTUpdate(logger, runner_cls=UpdateRunner, **args)...

Full Screen

Full Screen

unittestpy.py

Source:unittestpy.py Github

copy

Full Screen

...22 self.subtest_failures = {}23 self.messages = TeamcityServiceMessages(_real_stdout)24 self.current_test_id = None25 @staticmethod26 def get_test_id(test):27 if is_string(test):28 return test29 test_class_fullname = get_class_fullname(test)30 test_id = test.id()31 if test_class_fullname in _ERROR_HOLDERS_FQN:32 # patch setUpModule (__main__) -> __main__.setUpModule33 return re.sub(r'^(.*) \((.*)\)$', r'\2.\1', test_id)34 # Force test_id for doctests35 if test_class_fullname != "doctest.DocTestCase":36 desc = test.shortDescription()37 test_method_name = getattr(test, "_testMethodName", "")38 if desc and desc != test_id and desc != test_method_name:39 return "%s (%s)" % (test_id, desc.replace('.', '_'))40 return test_id41 def addSuccess(self, test):42 super(TeamcityTestResult, self).addSuccess(test)43 def addExpectedFailure(self, test, err):44 _super = super(TeamcityTestResult, self)45 if hasattr(_super, "addExpectedFailure"):46 _super.addExpectedFailure(test, err)47 err = convert_error_to_string(err)48 test_id = self.get_test_id(test)49 self.messages.testIgnored(test_id, message="Expected failure: " + err, flowId=test_id)50 def get_subtest_block_id(self, test, subtest):51 test_id = self.get_test_id(test)52 subtest_id = self.get_test_id(subtest)53 if subtest_id.startswith(test_id):54 block_id = subtest_id[len(test_id):].strip()55 else:56 block_id = subtest_id57 if len(block_id) == 0:58 block_id = test_id59 return block_id60 def addSkip(self, test, reason=""):61 if sys.version_info >= (2, 7):62 super(TeamcityTestResult, self).addSkip(test, reason)63 if reason:64 if isinstance(reason, Exception):65 reason_str = ": " + get_exception_message(reason)66 else:67 reason_str = ": " + to_unicode(reason)68 else:69 reason_str = ""70 test_class_name = get_class_fullname(test)71 if test_class_name == "unittest.case._SubTest" or test_class_name == "unittest2.case._SubTest":72 parent_test = test.test_case73 parent_test_id = self.get_test_id(parent_test)74 subtest = test75 block_id = self.get_subtest_block_id(parent_test, subtest)76 self.messages.subTestBlockOpened(block_id, subTestResult="Skip", flowId=parent_test_id)77 self.messages.testStdOut(parent_test_id, out="SubTest skipped" + reason_str + "\n", flowId=parent_test_id)78 self.messages.blockClosed(block_id, flowId=parent_test_id)79 else:80 test_id = self.get_test_id(test)81 if test_id not in self.test_started_datetime_map:82 # Test ignored without startTest. Handle start and finish events ourselves83 self.messages.testStarted(test_id, flowId=test_id)84 self.messages.testIgnored(test_id, message="Skipped" + reason_str, flowId=test_id)85 self.messages.testFinished(test_id, flowId=test_id)86 else:87 self.messages.testIgnored(test_id, message="Skipped" + reason_str, flowId=test_id)88 def addUnexpectedSuccess(self, test):89 _super = super(TeamcityTestResult, self)90 if hasattr(_super, "addUnexpectedSuccess"):91 _super.addUnexpectedSuccess(test)92 test_id = self.get_test_id(test)93 self.messages.testFailed(test_id, message='Failure',94 details="Test should not succeed since it's marked with @unittest.expectedFailure",95 flowId=test_id)96 def addError(self, test, err, *k):97 super(TeamcityTestResult, self).addError(test, err)98 test_class = get_class_fullname(test)99 if test_class in _ERROR_HOLDERS_FQN:100 # This is a standalone error101 test_id = self.get_test_id(test)102 self.messages.testStarted(test_id, flowId=test_id)103 self.report_fail(test, 'Failure', err)104 self.messages.testFinished(test_id, flowId=test_id)105 elif get_class_fullname(err[0]) == "unittest2.case.SkipTest":106 message = ""107 if hasattr(err[1], "message"):108 message = getattr(err[1], "message", "")109 elif hasattr(err[1], "args"):110 message = getattr(err[1], "args", [""])[0]111 self.addSkip(test, message)112 else:113 self.report_fail(test, 'Error', err)114 def addFailure(self, test, err, *k):115 super(TeamcityTestResult, self).addFailure(test, err)116 self.report_fail(test, 'Failure', err)117 def addSubTest(self, test, subtest, err):118 _super = super(TeamcityTestResult, self)119 if hasattr(_super, "addSubTest"):120 _super.addSubTest(test, subtest, err)121 test_id = self.get_test_id(test)122 subtest_id = self.get_test_id(subtest)123 if subtest_id.startswith(test_id):124 # Replace "." -> "_" since '.' is a test hierarchy separator125 # See i.e. https://github.com/JetBrains/teamcity-messages/issues/134 (https://youtrack.jetbrains.com/issue/PY-23846)126 block_id = subtest_id[len(test_id):].strip().replace(".", "_")127 else:128 block_id = subtest_id129 if len(block_id) == 0:130 block_id = subtest_id131 if err is not None:132 self.add_subtest_failure(test_id, block_id)133 if issubclass(err[0], test.failureException):134 self.messages.subTestBlockOpened(block_id, subTestResult="Failure", flowId=test_id)135 self.messages.testStdErr(test_id, out="SubTest failure: %s\n" % convert_error_to_string(err), flowId=test_id)136 self.messages.blockClosed(block_id, flowId=test_id)137 else:138 self.messages.subTestBlockOpened(block_id, subTestResult="Error", flowId=test_id)139 self.messages.testStdErr(test_id, out="SubTest error: %s\n" % convert_error_to_string(err), flowId=test_id)140 self.messages.blockClosed(block_id, flowId=test_id)141 else:142 self.messages.subTestBlockOpened(block_id, subTestResult="Success", flowId=test_id)143 self.messages.blockClosed(block_id, flowId=test_id)144 def add_subtest_failure(self, test_id, subtest_block_id):145 fail_array = self.subtest_failures.get(test_id, [])146 fail_array.append(subtest_block_id)147 self.subtest_failures[test_id] = fail_array148 def get_subtest_failure(self, test_id):149 fail_array = self.subtest_failures.get(test_id, [])150 return ", ".join(fail_array)151 def report_fail(self, test, fail_type, err):152 test_id = self.get_test_id(test)153 diff_failed = None154 try:155 error = err[1]156 if isinstance(error, EqualsAssertionError):157 diff_failed = error158 except Exception:159 pass160 if is_string(err):161 details = err162 elif get_class_fullname(err) == "twisted.python.failure.Failure":163 details = err.getTraceback()164 else:165 frames_to_skip_from_tail = 2 if diff_failed else 0166 details = convert_error_to_string(err, frames_to_skip_from_tail)167 subtest_failures = self.get_subtest_failure(test_id)168 if subtest_failures:169 details = "Failed subtests list: " + subtest_failures + "\n\n" + details.strip()170 details = details.strip()171 if diff_failed:172 self.messages.testFailed(test_id,173 message=diff_failed.msg,174 details=details,175 flowId=test_id,176 comparison_failure=diff_failed)177 else:178 self.messages.testFailed(test_id, message=fail_type, details=details, flowId=test_id)179 self.failed_tests.add(test_id)180 def startTest(self, test):181 test_id = self.get_test_id(test)182 self.current_test_id = test_id183 super(TeamcityTestResult, self).startTest(test)184 self.test_started_datetime_map[test_id] = datetime.datetime.now()185 self.messages.testStarted(test_id, captureStandardOutput='true', flowId=test_id)186 def _dump_test_stderr(self, data):187 if self.current_test_id is not None:188 dump_test_stderr(self.messages, self.current_test_id, self.current_test_id, data)189 else:190 _real_stderr.write(data)191 def _dump_test_stdout(self, data):192 if self.current_test_id is not None:193 dump_test_stdout(self.messages, self.current_test_id, self.current_test_id, data)194 else:195 _real_stdout.write(data)196 def _setupStdout(self):197 if getattr(self, 'buffer', None):198 self._stderr_buffer = FlushingStringIO(self._dump_test_stderr)199 self._stdout_buffer = FlushingStringIO(self._dump_test_stdout)200 sys.stdout = self._stdout_buffer201 sys.stderr = self._stderr_buffer202 def stopTest(self, test):203 test_id = self.get_test_id(test)204 if getattr(self, 'buffer', None):205 # Do not allow super() method to print output by itself206 self._mirrorOutput = False207 output = sys.stdout.getvalue()208 if output:209 dump_test_stdout(self.messages, test_id, test_id, output)210 error = sys.stderr.getvalue()211 if error:212 dump_test_stderr(self.messages, test_id, test_id, error)213 super(TeamcityTestResult, self).stopTest(test)214 self.current_test_id = None215 if test_id not in self.failed_tests:216 subtest_failures = self.get_subtest_failure(test_id)217 if subtest_failures:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful