How to use generate_cases_metadata method in lisa

Best Python code snippet using lisa_python

test_lisa_runner.py

Source:test_lisa_runner.py Github

copy

Full Screen

...206 # predefined env can run case in below condition.207 # 1. with predefined env of 1 simple node, so ut2 don't need a new env208 # 2. ut3 need 8 cores, and predefined env target to meet all core requirement,209 # so it can run any case with core requirements.210 test_testsuite.generate_cases_metadata()211 env_runbook = generate_env_runbook(is_single_env=True, remote=True)212 runner = generate_runner(env_runbook)213 test_results = self._run_all_tests(runner)214 self.verify_env_results(215 expected_prepared=["customized_0"],216 expected_deployed_envs=["customized_0"],217 expected_deleted_envs=["customized_0"],218 runner=runner,219 )220 self.verify_test_results(221 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],222 expected_envs=["", "customized_0", "customized_0"],223 expected_status=[TestStatus.SKIPPED, TestStatus.PASSED, TestStatus.PASSED],224 expected_message=[self.__skipped_no_env, "", ""],225 test_results=test_results,226 )227 def test_fit_a_bigger_env(self) -> None:228 # similar with test_fit_a_predefined_env, but predefined 2 nodes,229 # it doesn't equal to any case req, but reusable for all cases.230 test_testsuite.generate_cases_metadata()231 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)232 runner = generate_runner(env_runbook)233 test_results = self._run_all_tests(runner)234 self.verify_env_results(235 expected_prepared=["customized_0"],236 expected_deployed_envs=["customized_0"],237 expected_deleted_envs=["customized_0"],238 runner=runner,239 )240 self.verify_test_results(241 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],242 expected_envs=["customized_0", "customized_0", "customized_0"],243 expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],244 expected_message=["", "", ""],245 test_results=test_results,246 )247 def test_case_new_env_run_only_1_needed_customized(self) -> None:248 # same predefined env as test_fit_a_bigger_env,249 # but all case want to run on a new env250 test_testsuite.generate_cases_metadata()251 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)252 runner = generate_runner(env_runbook, case_use_new_env=True)253 test_results = self._run_all_tests(runner)254 self.verify_env_results(255 expected_prepared=["customized_0"],256 expected_deployed_envs=["customized_0"],257 expected_deleted_envs=["customized_0"],258 runner=runner,259 )260 self.verify_test_results(261 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],262 expected_envs=["customized_0", "", ""],263 expected_status=[TestStatus.PASSED, TestStatus.SKIPPED, TestStatus.SKIPPED],264 expected_message=["", self.__skipped_no_env, self.__skipped_no_env],265 test_results=test_results,266 )267 def test_case_new_env_run_only_1_needed_generated(self) -> None:268 # same predefined env as test_fit_a_bigger_env,269 # but all case want to run on a new env270 test_testsuite.generate_cases_metadata()271 env_runbook = generate_env_runbook()272 runner = generate_runner(env_runbook, case_use_new_env=True, times=2)273 test_results = self._run_all_tests(runner)274 self.verify_env_results(275 expected_prepared=[276 "generated_0",277 "generated_1",278 "generated_2",279 "generated_3",280 "generated_4",281 "generated_5",282 ],283 expected_deployed_envs=[284 "generated_0",285 "generated_1",286 "generated_2",287 "generated_3",288 "generated_4",289 "generated_5",290 ],291 expected_deleted_envs=[292 "generated_0",293 "generated_1",294 "generated_2",295 "generated_3",296 "generated_4",297 "generated_5",298 ],299 runner=runner,300 )301 self.verify_test_results(302 expected_test_order=[303 "mock_ut1",304 "mock_ut1",305 "mock_ut2",306 "mock_ut2",307 "mock_ut3",308 "mock_ut3",309 ],310 expected_envs=[311 "generated_0",312 "generated_1",313 "generated_2",314 "generated_3",315 "generated_4",316 "generated_5",317 ],318 expected_status=[319 TestStatus.PASSED,320 TestStatus.PASSED,321 TestStatus.PASSED,322 TestStatus.PASSED,323 TestStatus.PASSED,324 TestStatus.PASSED,325 ],326 expected_message=["", "", "", "", "", ""],327 test_results=test_results,328 )329 def test_no_needed_env(self) -> None:330 # two 1 node env predefined, but only customized_0 go to deploy331 # no cases assigned to customized_1, as fit cases run on customized_0 already332 test_testsuite.generate_cases_metadata()333 env_runbook = generate_env_runbook(local=True, remote=True)334 runner = generate_runner(env_runbook)335 test_results = self._run_all_tests(runner)336 self.verify_env_results(337 expected_prepared=[338 "customized_0",339 "customized_1",340 ],341 expected_deployed_envs=["customized_0"],342 expected_deleted_envs=["customized_0"],343 runner=runner,344 )345 self.verify_test_results(346 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],347 expected_envs=["", "customized_0", "customized_0"],348 expected_status=[TestStatus.SKIPPED, TestStatus.PASSED, TestStatus.PASSED],349 expected_message=[self.__skipped_no_env, "", ""],350 test_results=test_results,351 )352 def test_deploy_no_more_resource(self) -> None:353 # platform may see no more resource, like no azure quota.354 # cases skipped due to this.355 # In future, will add retry on wait more resource.356 platform_schema = test_platform.MockPlatformSchema()357 platform_schema.wait_more_resource_error = True358 test_testsuite.generate_cases_metadata()359 env_runbook = generate_env_runbook(is_single_env=True, local=True)360 runner = generate_runner(env_runbook, platform_schema=platform_schema)361 test_results = self._run_all_tests(runner)362 self.verify_env_results(363 expected_prepared=["customized_0"],364 expected_deployed_envs=[],365 expected_deleted_envs=["customized_0"],366 runner=runner,367 )368 no_awaitable_resource_message = "deployment skipped: awaitable resource"369 no_more_resource_message = "no available environment"370 self.verify_test_results(371 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],372 expected_envs=["", "customized_0", ""],373 expected_status=[374 TestStatus.SKIPPED,375 TestStatus.SKIPPED,376 TestStatus.SKIPPED,377 ],378 expected_message=[379 no_more_resource_message,380 no_awaitable_resource_message,381 no_more_resource_message,382 ],383 test_results=test_results,384 )385 def test_skipped_on_suite_failure(self) -> None:386 # First two tests were skipped because the setup is made to fail.387 test_testsuite.fail_on_before_suite = True388 test_testsuite.generate_cases_metadata()389 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)390 runner = generate_runner(env_runbook)391 test_results = self._run_all_tests(runner)392 self.verify_env_results(393 expected_prepared=["customized_0"],394 expected_deployed_envs=["customized_0"],395 expected_deleted_envs=["customized_0"],396 runner=runner,397 )398 before_suite_failed = "before_suite: failed"399 self.verify_test_results(400 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],401 expected_envs=["customized_0", "customized_0", "customized_0"],402 expected_status=[403 TestStatus.SKIPPED,404 TestStatus.SKIPPED,405 TestStatus.PASSED,406 ],407 expected_message=[before_suite_failed, before_suite_failed, ""],408 test_results=test_results,409 )410 def test_env_failed_not_prepared_env(self) -> None:411 # test env not prepared, so test cases cannot find an env to run412 platform_schema = test_platform.MockPlatformSchema()413 platform_schema.return_prepared = False414 test_testsuite.generate_cases_metadata()415 runner = generate_runner(None, platform_schema=platform_schema)416 test_results = self._run_all_tests(runner)417 self.verify_env_results(418 expected_prepared=[419 "generated_0",420 "generated_1",421 "generated_2",422 ],423 expected_deployed_envs=[],424 expected_deleted_envs=[],425 runner=runner,426 )427 no_available_env = (428 "deployment failed. LisaException: no capability found for environment: "429 )430 self.verify_test_results(431 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],432 expected_envs=[433 "generated_0",434 "generated_1",435 "generated_2",436 ],437 expected_status=[438 TestStatus.FAILED,439 TestStatus.FAILED,440 TestStatus.FAILED,441 ],442 expected_message=[443 no_available_env,444 no_available_env,445 no_available_env,446 ],447 test_results=test_results,448 )449 def test_env_failed_more_failed_env_on_prepare(self) -> None:450 # test env not prepared, so test cases cannot find an env to run451 platform_schema = test_platform.MockPlatformSchema()452 platform_schema.return_prepared = False453 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)454 runner = generate_runner(env_runbook, platform_schema=platform_schema)455 with self.assertRaises(LisaException) as cm:456 _ = self._run_all_tests(runner)457 self.assertIn(458 "There are no remaining test results to run, ",459 str(cm.exception),460 )461 def test_env_deploy_failed(self) -> None:462 # env prepared, but deployment failed, so cases failed463 platform_schema = test_platform.MockPlatformSchema()464 platform_schema.deployed_status = EnvironmentStatus.Prepared465 test_testsuite.generate_cases_metadata()466 env_runbook = generate_env_runbook()467 runner = generate_runner(env_runbook, platform_schema=platform_schema)468 test_results = self._run_all_tests(runner)469 self.verify_env_results(470 expected_prepared=[471 "generated_0",472 "generated_1",473 "generated_2",474 ],475 expected_deployed_envs=[476 "generated_0",477 "generated_1",478 "generated_2",479 ],...

Full Screen

Full Screen

test_testsuite.py

Source:test_testsuite.py Github

copy

Full Screen

...97 ...98def cleanup_cases_metadata() -> None:99 get_cases_metadata().clear()100 get_suites_metadata().clear()101def generate_cases_metadata() -> List[TestCaseMetadata]:102 ut_cases = [103 TestCaseMetadata(104 "ut1",105 0,106 requirement=simple_requirement(min_count=2),107 ),108 TestCaseMetadata("ut2", 1),109 TestCaseMetadata("ut3", 2),110 ]111 suite_metadata1 = TestSuiteMetadata("a1", "c1", "des1", ["t1", "t2"])112 suite_metadata1(MockTestSuite)113 ut_cases[0](MockTestSuite.mock_ut1)114 ut_cases[1](MockTestSuite.mock_ut2)115 suite_metadata2 = TestSuiteMetadata(116 "a2",117 "c2",118 "des2",119 ["t2", "t3"],120 requirement=node_requirement(node=schema.NodeSpace(core_count=8)),121 )122 suite_metadata2(MockTestSuite2)123 ut_cases[2](MockTestSuite2.mock_ut3)124 return ut_cases125def generate_cases_result() -> List[TestResult]:126 case_metadata = generate_cases_metadata()127 case_results = [TestResult("0", TestCaseRuntimeData(x)) for x in case_metadata]128 return case_results129def select_and_check(130 ut: TestCase, case_runbook: List[Any], expected_descriptions: List[str]131) -> List[TestCaseRuntimeData]:132 runbook = RunbookBuilder._validate_and_load({constants.TESTCASE: case_runbook})133 case_metadata = generate_cases_metadata()134 runbook.testcase = parse_testcase_filters(runbook.testcase_raw)135 filters = cast(List[schema.TestCase], runbook.testcase)136 selected = select_testcases(filters, case_metadata)137 ut.assertListEqual(expected_descriptions, [case.description for case in selected])138 return selected139class TestSuiteTestCase(TestCase):140 def generate_suite_instance(self) -> MockTestSuite:141 case_results = generate_cases_result()142 self.case_results = case_results[:2]143 suite_metadata = case_results[0].runtime_data.metadata.suite144 runbook = generate_runbook(is_single_env=True, local=True, remote=True)145 envs = load_environments(runbook)146 self.default_env = list(envs.values())[0]147 assert self.default_env148 test_suite = MockTestSuite(149 metadata=suite_metadata,150 )151 return test_suite152 def setUp(self) -> None:153 cleanup_cases_metadata()154 def test_expanded_nodespace(self) -> None:155 cases = generate_cases_metadata()156 for case in cases:157 self.assertIsNotNone(case.requirement)158 assert case.requirement.environment159 for node in case.requirement.environment.nodes:160 self.assertEqual(161 1, node.node_count, "node count should be expanded to 1"162 )163 def test_case_override_suite(self) -> None:164 cases = generate_cases_metadata()165 case1_found = False166 case2_found = False167 for case in cases:168 assert case.requirement.environment169 assert case.suite.requirement.environment170 if case.name == "mock_ut1":171 self.assertEqual(2, len(case.requirement.environment.nodes))172 self.assertEqual(1, len(case.suite.requirement.environment.nodes))173 case1_found = True174 if case.name == "mock_ut2":175 self.assertEqual(1, len(case.requirement.environment.nodes))176 self.assertEqual(1, len(case.suite.requirement.environment.nodes))177 case2_found = True178 self.assertEqual(True, case1_found)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful