How to use mock_ut1 method in lisa

Best Python code snippet using lisa_python

test_lisa_runner.py

Source:test_lisa_runner.py Github

copy

Full Screen

1# Copyright (c) Microsoft Corporation.2# Licensed under the MIT license.3from typing import List, Optional, Union, cast4from unittest import TestCase5import lisa6from lisa import LisaException, constants, schema7from lisa.environment import EnvironmentStatus, load_environments8from lisa.messages import TestResultMessage, TestStatus9from lisa.notifier import register_notifier10from lisa.runner import RunnerResult11from lisa.runners.lisa_runner import LisaRunner12from lisa.testsuite import TestResult, simple_requirement13from lisa.util.parallel import Task14from selftests import test_platform, test_testsuite15from selftests.test_environment import generate_runbook as generate_env_runbook16def generate_runner(17 env_runbook: Optional[schema.EnvironmentRoot] = None,18 case_use_new_env: bool = False,19 times: int = 1,20 platform_schema: Optional[test_platform.MockPlatformSchema] = None,21) -> LisaRunner:22 platform_runbook = schema.Platform(23 type=constants.PLATFORM_MOCK, admin_password="do-not-use"24 )25 if platform_schema:26 platform_runbook.extended_schemas = {27 constants.PLATFORM_MOCK: platform_schema.to_dict() # type:ignore28 }29 runbook = schema.Runbook(30 platform=[platform_runbook],31 )32 runbook.testcase = [33 schema.TestCase(34 criteria=schema.Criteria(priority=[0, 1, 2]),35 use_new_environment=case_use_new_env,36 times=times,37 )38 ]39 runbook.wait_resource_timeout = 040 if env_runbook:41 runbook.environment = env_runbook42 runner = LisaRunner(runbook, 0, {})43 return runner44class RunnerTestCase(TestCase):45 __skipped_no_env = "no available environment"46 def setUp(self) -> None:47 lisa.environment._global_environment_id = 048 def tearDown(self) -> None:49 test_testsuite.cleanup_cases_metadata() # Necessary side effects!50 def test_merge_req_create_on_new(self) -> None:51 # if no predefined envs, can generate from requirement52 env_runbook = generate_env_runbook(is_single_env=False)53 envs = load_environments(env_runbook)54 self.assertListEqual(55 [],56 [x for x in envs],57 )58 runner = generate_runner(None)59 test_results = test_testsuite.generate_cases_result()60 runner._merge_test_requirements(61 test_results=test_results,62 existing_environments=envs,63 platform_type=constants.PLATFORM_MOCK,64 )65 # 3 cases create 3 environments.66 self.assertListEqual(67 ["generated_0", "generated_1", "generated_2"],68 list(envs),69 )70 self.verify_test_results(71 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],72 expected_envs=["", "", ""],73 expected_status=[TestStatus.QUEUED, TestStatus.QUEUED, TestStatus.QUEUED],74 expected_message=["", "", ""],75 test_results=test_results,76 )77 def test_merge_req(self) -> None:78 # each test case will create an environment candidate.79 env_runbook = generate_env_runbook(remote=True)80 envs = load_environments(env_runbook)81 self.assertListEqual(82 ["customized_0"],83 list(envs),84 )85 runner = generate_runner(env_runbook)86 test_results = test_testsuite.generate_cases_result()87 runner._merge_test_requirements(88 test_results=test_results,89 existing_environments=envs,90 platform_type=constants.PLATFORM_MOCK,91 )92 self.assertListEqual(93 ["customized_0", "generated_1", "generated_2", "generated_3"],94 list(envs),95 )96 self.assertListEqual(97 [98 TestStatus.QUEUED,99 TestStatus.QUEUED,100 TestStatus.QUEUED,101 ],102 [x.status for x in test_results],103 )104 def test_merge_req_create_on_use_new(self) -> None:105 # same runbook as test_merge_req_run_not_create_on_equal106 # but all 3 cases asks a new env, so create 3 envs107 # note, when running cases, predefined env will be treat as a new env.108 env_runbook = generate_env_runbook(remote=True)109 envs = load_environments(env_runbook)110 self.assertListEqual(111 ["customized_0"],112 list(envs),113 )114 runner = generate_runner(env_runbook)115 test_results = test_testsuite.generate_cases_result()116 for test_result in test_results:117 test_result.runtime_data.use_new_environment = True118 runner._merge_test_requirements(119 test_results=test_results,120 existing_environments=envs,121 platform_type=constants.PLATFORM_MOCK,122 )123 # All 3 cases needed a new environment, so it created 3.124 self.assertListEqual(125 ["customized_0", "generated_1", "generated_2", "generated_3"],126 list(envs),127 )128 self.verify_test_results(129 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],130 expected_envs=["", "", ""],131 expected_status=[TestStatus.QUEUED, TestStatus.QUEUED, TestStatus.QUEUED],132 expected_message=["", "", ""],133 test_results=test_results,134 )135 def test_merge_req_all_generated(self) -> None:136 # force to use existing env, not to create new.137 # this case doesn't provide predefined env, but no case skipped on this stage.138 env_runbook = generate_env_runbook(is_single_env=False)139 envs = load_environments(env_runbook)140 self.assertListEqual(141 [],142 list(envs),143 )144 runner = generate_runner(None)145 test_results = test_testsuite.generate_cases_result()146 runner._merge_test_requirements(147 test_results=test_results,148 existing_environments=envs,149 platform_type=constants.PLATFORM_MOCK,150 )151 self.assertListEqual(152 ["generated_0", "generated_1", "generated_2"],153 list(envs),154 )155 self.verify_test_results(156 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],157 expected_envs=["", "", ""],158 expected_status=[159 TestStatus.QUEUED,160 TestStatus.QUEUED,161 TestStatus.QUEUED,162 ],163 expected_message=["", "", ""],164 test_results=test_results,165 )166 def test_merge_req_platform_type_checked(self) -> None:167 # check if current platform supported,168 # for example, some case run on azure only.169 # platform check happens in runner, so this case is here170 # a simple check is enough. More covered by search_space.SetSpace171 env_runbook = generate_env_runbook(is_single_env=False)172 envs = load_environments(env_runbook)173 self.assertListEqual(174 [],175 list(envs),176 )177 runner = generate_runner(None)178 test_results = test_testsuite.generate_cases_result()179 for test_result in test_results:180 metadata = test_result.runtime_data.metadata181 metadata.requirement = simple_requirement(182 supported_platform_type=["does-not-exist"]183 )184 runner._merge_test_requirements(185 test_results=test_results,186 existing_environments=envs,187 platform_type=constants.PLATFORM_MOCK,188 )189 platform_unsupported = "capability cannot support some of requirement"190 self.verify_test_results(191 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],192 expected_envs=["", "", ""],193 expected_status=[194 TestStatus.SKIPPED,195 TestStatus.SKIPPED,196 TestStatus.SKIPPED,197 ],198 expected_message=[199 platform_unsupported,200 platform_unsupported,201 platform_unsupported,202 ],203 test_results=test_results,204 )205 def test_fit_a_predefined_env(self) -> None:206 # predefined env can run case in below condition.207 # 1. with predefined env of 1 simple node, so ut2 don't need a new env208 # 2. ut3 need 8 cores, and predefined env target to meet all core requirement,209 # so it can run any case with core requirements.210 test_testsuite.generate_cases_metadata()211 env_runbook = generate_env_runbook(is_single_env=True, remote=True)212 runner = generate_runner(env_runbook)213 test_results = self._run_all_tests(runner)214 self.verify_env_results(215 expected_prepared=["customized_0"],216 expected_deployed_envs=["customized_0"],217 expected_deleted_envs=["customized_0"],218 runner=runner,219 )220 self.verify_test_results(221 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],222 expected_envs=["", "customized_0", "customized_0"],223 expected_status=[TestStatus.SKIPPED, TestStatus.PASSED, TestStatus.PASSED],224 expected_message=[self.__skipped_no_env, "", ""],225 test_results=test_results,226 )227 def test_fit_a_bigger_env(self) -> None:228 # similar with test_fit_a_predefined_env, but predefined 2 nodes,229 # it doesn't equal to any case req, but reusable for all cases.230 test_testsuite.generate_cases_metadata()231 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)232 runner = generate_runner(env_runbook)233 test_results = self._run_all_tests(runner)234 self.verify_env_results(235 expected_prepared=["customized_0"],236 expected_deployed_envs=["customized_0"],237 expected_deleted_envs=["customized_0"],238 runner=runner,239 )240 self.verify_test_results(241 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],242 expected_envs=["customized_0", "customized_0", "customized_0"],243 expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],244 expected_message=["", "", ""],245 test_results=test_results,246 )247 def test_case_new_env_run_only_1_needed_customized(self) -> None:248 # same predefined env as test_fit_a_bigger_env,249 # but all case want to run on a new env250 test_testsuite.generate_cases_metadata()251 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)252 runner = generate_runner(env_runbook, case_use_new_env=True)253 test_results = self._run_all_tests(runner)254 self.verify_env_results(255 expected_prepared=["customized_0"],256 expected_deployed_envs=["customized_0"],257 expected_deleted_envs=["customized_0"],258 runner=runner,259 )260 self.verify_test_results(261 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],262 expected_envs=["customized_0", "", ""],263 expected_status=[TestStatus.PASSED, TestStatus.SKIPPED, TestStatus.SKIPPED],264 expected_message=["", self.__skipped_no_env, self.__skipped_no_env],265 test_results=test_results,266 )267 def test_case_new_env_run_only_1_needed_generated(self) -> None:268 # same predefined env as test_fit_a_bigger_env,269 # but all case want to run on a new env270 test_testsuite.generate_cases_metadata()271 env_runbook = generate_env_runbook()272 runner = generate_runner(env_runbook, case_use_new_env=True, times=2)273 test_results = self._run_all_tests(runner)274 self.verify_env_results(275 expected_prepared=[276 "generated_0",277 "generated_1",278 "generated_2",279 "generated_3",280 "generated_4",281 "generated_5",282 ],283 expected_deployed_envs=[284 "generated_0",285 "generated_1",286 "generated_2",287 "generated_3",288 "generated_4",289 "generated_5",290 ],291 expected_deleted_envs=[292 "generated_0",293 "generated_1",294 "generated_2",295 "generated_3",296 "generated_4",297 "generated_5",298 ],299 runner=runner,300 )301 self.verify_test_results(302 expected_test_order=[303 "mock_ut1",304 "mock_ut1",305 "mock_ut2",306 "mock_ut2",307 "mock_ut3",308 "mock_ut3",309 ],310 expected_envs=[311 "generated_0",312 "generated_1",313 "generated_2",314 "generated_3",315 "generated_4",316 "generated_5",317 ],318 expected_status=[319 TestStatus.PASSED,320 TestStatus.PASSED,321 TestStatus.PASSED,322 TestStatus.PASSED,323 TestStatus.PASSED,324 TestStatus.PASSED,325 ],326 expected_message=["", "", "", "", "", ""],327 test_results=test_results,328 )329 def test_no_needed_env(self) -> None:330 # two 1 node env predefined, but only customized_0 go to deploy331 # no cases assigned to customized_1, as fit cases run on customized_0 already332 test_testsuite.generate_cases_metadata()333 env_runbook = generate_env_runbook(local=True, remote=True)334 runner = generate_runner(env_runbook)335 test_results = self._run_all_tests(runner)336 self.verify_env_results(337 expected_prepared=[338 "customized_0",339 "customized_1",340 ],341 expected_deployed_envs=["customized_0"],342 expected_deleted_envs=["customized_0"],343 runner=runner,344 )345 self.verify_test_results(346 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],347 expected_envs=["", "customized_0", "customized_0"],348 expected_status=[TestStatus.SKIPPED, TestStatus.PASSED, TestStatus.PASSED],349 expected_message=[self.__skipped_no_env, "", ""],350 test_results=test_results,351 )352 def test_deploy_no_more_resource(self) -> None:353 # platform may see no more resource, like no azure quota.354 # cases skipped due to this.355 # In future, will add retry on wait more resource.356 platform_schema = test_platform.MockPlatformSchema()357 platform_schema.wait_more_resource_error = True358 test_testsuite.generate_cases_metadata()359 env_runbook = generate_env_runbook(is_single_env=True, local=True)360 runner = generate_runner(env_runbook, platform_schema=platform_schema)361 test_results = self._run_all_tests(runner)362 self.verify_env_results(363 expected_prepared=["customized_0"],364 expected_deployed_envs=[],365 expected_deleted_envs=["customized_0"],366 runner=runner,367 )368 no_awaitable_resource_message = "deployment skipped: awaitable resource"369 no_more_resource_message = "no available environment"370 self.verify_test_results(371 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],372 expected_envs=["", "customized_0", ""],373 expected_status=[374 TestStatus.SKIPPED,375 TestStatus.SKIPPED,376 TestStatus.SKIPPED,377 ],378 expected_message=[379 no_more_resource_message,380 no_awaitable_resource_message,381 no_more_resource_message,382 ],383 test_results=test_results,384 )385 def test_skipped_on_suite_failure(self) -> None:386 # First two tests were skipped because the setup is made to fail.387 test_testsuite.fail_on_before_suite = True388 test_testsuite.generate_cases_metadata()389 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)390 runner = generate_runner(env_runbook)391 test_results = self._run_all_tests(runner)392 self.verify_env_results(393 expected_prepared=["customized_0"],394 expected_deployed_envs=["customized_0"],395 expected_deleted_envs=["customized_0"],396 runner=runner,397 )398 before_suite_failed = "before_suite: failed"399 self.verify_test_results(400 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],401 expected_envs=["customized_0", "customized_0", "customized_0"],402 expected_status=[403 TestStatus.SKIPPED,404 TestStatus.SKIPPED,405 TestStatus.PASSED,406 ],407 expected_message=[before_suite_failed, before_suite_failed, ""],408 test_results=test_results,409 )410 def test_env_failed_not_prepared_env(self) -> None:411 # test env not prepared, so test cases cannot find an env to run412 platform_schema = test_platform.MockPlatformSchema()413 platform_schema.return_prepared = False414 test_testsuite.generate_cases_metadata()415 runner = generate_runner(None, platform_schema=platform_schema)416 test_results = self._run_all_tests(runner)417 self.verify_env_results(418 expected_prepared=[419 "generated_0",420 "generated_1",421 "generated_2",422 ],423 expected_deployed_envs=[],424 expected_deleted_envs=[],425 runner=runner,426 )427 no_available_env = (428 "deployment failed. LisaException: no capability found for environment: "429 )430 self.verify_test_results(431 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],432 expected_envs=[433 "generated_0",434 "generated_1",435 "generated_2",436 ],437 expected_status=[438 TestStatus.FAILED,439 TestStatus.FAILED,440 TestStatus.FAILED,441 ],442 expected_message=[443 no_available_env,444 no_available_env,445 no_available_env,446 ],447 test_results=test_results,448 )449 def test_env_failed_more_failed_env_on_prepare(self) -> None:450 # test env not prepared, so test cases cannot find an env to run451 platform_schema = test_platform.MockPlatformSchema()452 platform_schema.return_prepared = False453 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)454 runner = generate_runner(env_runbook, platform_schema=platform_schema)455 with self.assertRaises(LisaException) as cm:456 _ = self._run_all_tests(runner)457 self.assertIn(458 "There are no remaining test results to run, ",459 str(cm.exception),460 )461 def test_env_deploy_failed(self) -> None:462 # env prepared, but deployment failed, so cases failed463 platform_schema = test_platform.MockPlatformSchema()464 platform_schema.deployed_status = EnvironmentStatus.Prepared465 test_testsuite.generate_cases_metadata()466 env_runbook = generate_env_runbook()467 runner = generate_runner(env_runbook, platform_schema=platform_schema)468 test_results = self._run_all_tests(runner)469 self.verify_env_results(470 expected_prepared=[471 "generated_0",472 "generated_1",473 "generated_2",474 ],475 expected_deployed_envs=[476 "generated_0",477 "generated_1",478 "generated_2",479 ],480 expected_deleted_envs=[481 "generated_0",482 "generated_1",483 "generated_2",484 ],485 runner=runner,486 )487 no_available_env = (488 "deployment failed. LisaException: "489 "expected status is EnvironmentStatus.Prepared"490 )491 self.verify_test_results(492 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],493 expected_envs=["generated_0", "generated_1", "generated_2"],494 expected_status=[495 TestStatus.FAILED,496 TestStatus.FAILED,497 TestStatus.FAILED,498 ],499 expected_message=[no_available_env, no_available_env, no_available_env],500 test_results=test_results,501 )502 def test_env_skipped_no_case(self) -> None:503 # no case found, as not call generate_case_metadata504 # in this case, not deploy any env505 env_runbook = generate_env_runbook(is_single_env=True, remote=True)506 runner = generate_runner(env_runbook)507 test_results = self._run_all_tests(runner)508 # still prepare predefined, but not deploy509 self.verify_env_results(510 expected_prepared=["customized_0"],511 expected_deployed_envs=[],512 expected_deleted_envs=[],513 runner=runner,514 )515 self.verify_test_results(516 expected_test_order=[],517 expected_envs=[],518 expected_status=[],519 expected_message=[],520 test_results=test_results,521 )522 def verify_test_results(523 self,524 expected_test_order: List[str],525 expected_envs: List[str],526 expected_status: List[TestStatus],527 expected_message: List[str],528 test_results: Union[List[TestResultMessage], List[TestResult]],529 ) -> None:530 test_names: List[str] = []531 env_names: List[str] = []532 for test_result in test_results:533 if isinstance(test_result, TestResult):534 test_names.append(test_result.runtime_data.metadata.name)535 env_names.append(536 test_result.environment.name537 if test_result.environment is not None538 else ""539 )540 else:541 assert isinstance(test_result, TestResultMessage)542 test_names.append(test_result.full_name.split(".")[1])543 env_names.append(test_result.information.get("environment", ""))544 self.assertListEqual(545 expected_test_order,546 test_names,547 "test order inconsistent",548 )549 self.assertListEqual(550 expected_envs,551 env_names,552 "test env inconsistent",553 )554 self.assertListEqual(555 expected_status,556 [x.status for x in test_results],557 "test result inconsistent",558 )559 # compare it's begin with560 actual_messages = [561 test_results[index].message[0 : len(expected)]562 for index, expected in enumerate(expected_message)563 ]564 self.assertListEqual(565 expected_message,566 actual_messages,567 "test message inconsistent",568 )569 def verify_env_results(570 self,571 expected_prepared: List[str],572 expected_deployed_envs: List[str],573 expected_deleted_envs: List[str],574 runner: LisaRunner,575 ) -> None:576 platform = cast(test_platform.MockPlatform, runner.platform)577 platform_test_data = platform.test_data578 self.assertListEqual(579 expected_prepared,580 list(platform_test_data.prepared_envs),581 "prepared envs inconsistent",582 )583 self.assertListEqual(584 expected_deployed_envs,585 list(platform_test_data.deployed_envs),586 "deployed envs inconsistent",587 )588 self.assertListEqual(589 expected_deleted_envs,590 list(platform_test_data.deleted_envs),591 "deleted envs inconsistent",592 )593 def _run_all_tests(self, runner: LisaRunner) -> List[TestResultMessage]:594 results_collector = RunnerResult(schema.Notifier())595 register_notifier(results_collector)596 runner.initialize()597 while not runner.is_done:598 task = runner.fetch_task()599 if task:600 if isinstance(task, Task):601 task()...

Full Screen

Full Screen

test_testsuite.py

Source:test_testsuite.py Github

copy

Full Screen

...76 raise LisaException("failed")77 def after_case(self, log: Logger, **kwargs: Any) -> None:78 if self.fail_on_after_case:79 raise LisaException("failed")80 def mock_ut1(self, *args: Any, **kwargs: Any) -> None:81 if self.partial_pass:82 raise PassedException("mock_ut1 passed with warning")83 if self.skipped:84 raise SkippedException("mock_ut1 skipped this run")85 while self.fail_case_count > 0:86 self.fail_case_count -= 187 raise LisaException("mock_ut1 failed")88 def mock_ut2(self, variables: Dict[str, Any], **kwargs: Any) -> None:89 if self.check_variable:90 assert_that(variables).described_as("variable must exists").contains_entry(91 {"var": 1}92 )93 else:94 assert_that(variables).described_as("variable must not exists").is_empty()...

Full Screen

Full Screen

test_testselector.py

Source:test_testselector.py Github

copy

Full Screen

1# Copyright (c) Microsoft Corporation.2# Licensed under the MIT license.3from unittest import TestCase4from lisa import LisaException, constants5from selftests.test_testsuite import cleanup_cases_metadata, select_and_check6class SelectorTestCase(TestCase):7 def setUp(self) -> None:8 cleanup_cases_metadata()9 def test_no_case_selected(self) -> None:10 runbook = [{constants.TESTCASE_CRITERIA: {"area": "demo"}}]11 select_and_check(self, runbook, [])12 def test_select_by_priority(self) -> None:13 runbook = [{constants.TESTCASE_CRITERIA: {"priority": 0}}]14 select_and_check(self, runbook, ["ut1"])15 def test_select_by_tag(self) -> None:16 runbook = [{constants.TESTCASE_CRITERIA: {"tags": "t1"}}]17 select_and_check(self, runbook, ["ut1", "ut2"])18 def test_select_by_one_of_tag(self) -> None:19 runbook = [{constants.TESTCASE_CRITERIA: {"tags": ["t1", "t3"]}}]20 select_and_check(self, runbook, ["ut1", "ut2", "ut3"])21 def test_select_by_two_rules(self) -> None:22 runbook = [{constants.TESTCASE_CRITERIA: {"tags": ["t1", "t3"], "area": "a1"}}]23 select_and_check(self, runbook, ["ut1", "ut2"])24 def test_select_by_two_criteria(self) -> None:25 runbook = [26 {constants.TESTCASE_CRITERIA: {"name": "mock_ut1"}},27 {constants.TESTCASE_CRITERIA: {"name": "mock_ut2"}},28 ]29 select_and_check(self, runbook, ["ut1", "ut2"])30 def test_select_then_drop(self) -> None:31 runbook = [32 {constants.TESTCASE_CRITERIA: {"tags": "t1"}},33 {34 constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},35 constants.TESTCASE_SELECT_ACTION: "exclude",36 },37 ]38 select_and_check(self, runbook, ["ut1"])39 def test_select_drop_select(self) -> None:40 runbook = [41 {constants.TESTCASE_CRITERIA: {"tags": "t1"}},42 {43 constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},44 constants.TESTCASE_SELECT_ACTION: "exclude",45 },46 {constants.TESTCASE_CRITERIA: {"tags": "t1"}},47 ]48 select_and_check(self, runbook, ["ut1", "ut2"])49 def test_select_force_include(self) -> None:50 runbook = [51 {52 constants.TESTCASE_CRITERIA: {"tags": "t1"},53 constants.TESTCASE_SELECT_ACTION: "forceInclude",54 },55 {56 constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},57 constants.TESTCASE_SELECT_ACTION: "exclude",58 },59 ]60 select_and_check(self, runbook, ["ut1", "ut2"])61 def test_select_force_conflict(self) -> None:62 runbook = [63 {64 constants.TESTCASE_CRITERIA: {"tags": "t1"},65 constants.TESTCASE_SELECT_ACTION: "forceInclude",66 },67 {68 constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},69 constants.TESTCASE_SELECT_ACTION: "forceExclude",70 },71 ]72 with self.assertRaises(LisaException) as cm:73 select_and_check(self, runbook, ["ut1", "ut2"])74 self.assertIsInstance(cm.exception, LisaException)75 self.assertIn("force", str(cm.exception))76 def test_select_force_conflict_exclude(self) -> None:77 runbook = [78 {79 constants.TESTCASE_CRITERIA: {"tags": "t1"},80 constants.TESTCASE_SELECT_ACTION: "include",81 },82 {83 constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},84 constants.TESTCASE_SELECT_ACTION: "forceExclude",85 },86 {87 constants.TESTCASE_CRITERIA: {"tags": "t1"},88 constants.TESTCASE_SELECT_ACTION: "forceInclude",89 },90 ]91 with self.assertRaises(LisaException) as cm:92 select_and_check(self, runbook, [])93 self.assertIsInstance(cm.exception, LisaException)94 self.assertIn("force", str(cm.exception))95 def test_select_with_setting(self) -> None:96 runbook = [97 {constants.TESTCASE_CRITERIA: {"tags": "t1"}, "retry": 2},98 ]99 selected = select_and_check(self, runbook, ["ut1", "ut2"])100 self.assertListEqual([2, 2], [case.retry for case in selected])101 def test_select_with_times(self) -> None:102 runbook = [103 {constants.TESTCASE_CRITERIA: {"tags": "t1"}},104 {105 constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},106 "times": 2,107 constants.TESTCASE_SELECT_ACTION: "none",108 },109 ]110 selected = select_and_check(self, runbook, ["ut1", "ut2", "ut2"])111 self.assertListEqual([1, 2, 2], [case.times for case in selected])112 self.assertListEqual([0, 0, 0], [case.retry for case in selected])113 def test_select_with_setting_none(self) -> None:114 runbook = [115 {constants.TESTCASE_CRITERIA: {"tags": "t1"}},116 {117 constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},118 "retry": 2,119 constants.TESTCASE_SELECT_ACTION: "none",120 },121 ]122 selected = select_and_check(self, runbook, ["ut1", "ut2"])123 self.assertListEqual([0, 2], [case.retry for case in selected])124 def test_select_with_diff_setting(self) -> None:125 runbook = [126 {constants.TESTCASE_CRITERIA: {"tags": "t1"}, "retry": 2},127 {constants.TESTCASE_CRITERIA: {"name": "mock_ut2"}, "retry": 3},128 ]129 selected = select_and_check(self, runbook, ["ut1", "ut2"])...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful