How to use check_test_results method in lisa

Best Python code snippet using lisa_python

run_backend_tests_test.py

Source:run_backend_tests_test.py Github

copy

Full Screen

...227 expected_error_msg = (228 'Command \'%s\' returned non-zero exit status 1.' % test_cmd)229 with self.assertRaisesRegex(230 subprocess.CalledProcessError, expected_error_msg):231 run_backend_tests.check_test_results( # type: ignore[no-untyped-call]232 tasks, task_to_taskspec, False)233 def test_empty_test_files_show_no_tests_were_run(self) -> None:234 with self.swap_install_third_party_libs:235 from scripts import run_backend_tests236 task1 = MockTask()237 task1.exception = Exception('No tests were run.')238 tasks = [task1]239 task_to_taskspec = {}240 test_target = 'scripts.new_script.py'241 task_to_taskspec[tasks[0]] = run_backend_tests.TestingTaskSpec( # type: ignore[no-untyped-call]242 test_target, False)243 with self.print_swap:244 run_backend_tests.check_test_results( # type: ignore[no-untyped-call]245 tasks, task_to_taskspec, False)246 self.assertIn(247 'ERROR %s: No tests found.' % test_target, self.print_arr)248 def test_failed_test_suite_throws_error(self) -> None:249 with self.swap_install_third_party_libs:250 from scripts import run_backend_tests251 task1 = MockTask()252 task1.exception = Exception(253 'Test suite failed: 6 tests run, 0 errors, '254 '2 failures')255 tasks = [task1]256 task_to_taskspec = {}257 test_target = 'scripts.new_script.py'258 task_to_taskspec[tasks[0]] = run_backend_tests.TestingTaskSpec( # type: ignore[no-untyped-call]259 test_target, False)260 with self.print_swap:261 run_backend_tests.check_test_results( # type: ignore[no-untyped-call]262 tasks, task_to_taskspec, False)263 self.assertIn(264 'FAILED %s: %s errors, %s failures' % (test_target, 0, 2),265 self.print_arr)266 def test_tests_failed_due_to_internal_error(self) -> None:267 with self.swap_install_third_party_libs:268 from scripts import run_backend_tests269 task1 = MockTask()270 task1.exception = Exception('Some internal error.')271 tasks = [task1]272 task_to_taskspec = {}273 test_target = 'scripts.new_script.py'274 task_to_taskspec[tasks[0]] = run_backend_tests.TestingTaskSpec( # type: ignore[no-untyped-call]275 test_target, False)276 with self.print_swap, self.assertRaisesRegex(277 Exception, 'Some internal error.'278 ):279 run_backend_tests.check_test_results( # type: ignore[no-untyped-call]280 tasks, task_to_taskspec, False)281 self.assertIn(282 ' WARNING: FAILED TO RUN %s' % test_target, self.print_arr)283 self.assertIn(284 ' This is most likely due to an import error.', self.print_arr)285 def test_unfinished_tests_are_cancelled(self) -> None:286 with self.swap_install_third_party_libs:287 from scripts import run_backend_tests288 task = MockTask()289 task.finished = False290 task_output = ['Ran 9 tests in 1.244s', '98']291 task_result = concurrent_task_utils.TaskResult(292 'task1', False, task_output, task_output)293 task.task_results.append(task_result)294 tasks = [task]295 task_to_taskspec = {}296 test_target = 'scripts.new_script.py'297 task_to_taskspec[tasks[0]] = run_backend_tests.TestingTaskSpec( # type: ignore[no-untyped-call]298 test_target, False)299 with self.print_swap:300 run_backend_tests.check_test_results( # type: ignore[no-untyped-call]301 tasks, task_to_taskspec, True)302 self.assertIn('CANCELED %s' % test_target, self.print_arr)303 def test_incomplete_coverage_is_displayed_correctly(self) -> None:304 with self.swap_install_third_party_libs:305 from scripts import run_backend_tests306 task = MockTask()307 task_output = ['Ran 9 tests in 1.244s', '98']308 task_result = concurrent_task_utils.TaskResult(309 'task1', False, task_output, task_output)310 task.task_results.append(task_result)311 tasks = [task]312 task_to_taskspec = {}313 test_target = 'scripts.new_script.py'314 task_to_taskspec[tasks[0]] = run_backend_tests.TestingTaskSpec( # type: ignore[no-untyped-call]315 test_target, True)316 with self.print_swap:317 run_backend_tests.check_test_results( # type: ignore[no-untyped-call]318 tasks, task_to_taskspec, True)319 self.assertIn(320 'INCOMPLETE COVERAGE (98%%): %s' % test_target, self.print_arr)321 def test_successfull_test_run_message_is_printed_correctly(self) -> None:322 with self.swap_install_third_party_libs:323 from scripts import run_backend_tests324 task = MockTask()325 task_output = ['Ran 9 tests in 1.234s', '100']326 task_result = concurrent_task_utils.TaskResult(327 'task1', False, task_output, task_output)328 task.task_results.append(task_result)329 tasks = [task]330 task_to_taskspec = {}331 test_target = 'scripts.new_script.py'332 task_to_taskspec[tasks[0]] = run_backend_tests.TestingTaskSpec( # type: ignore[no-untyped-call]333 test_target, True)334 with self.print_swap:335 run_backend_tests.check_test_results( # type: ignore[no-untyped-call]336 tasks, task_to_taskspec, False)337 self.assertIn(338 'SUCCESS %s: 9 tests (1.2 secs)' % test_target,339 self.print_arr)340 def test_incomplete_coverage_in_excluded_files_is_ignored(self) -> None:341 with self.swap_install_third_party_libs:342 from scripts import run_backend_tests343 task = MockTask()344 task_output = ['Ran 9 tests in 1.234s', '98']345 task_result = concurrent_task_utils.TaskResult(346 'task1', False, task_output, task_output)347 task.task_results.append(task_result)348 tasks = [task]349 task_to_taskspec = {}350 test_target = 'scripts.new_script_test'351 task_to_taskspec[tasks[0]] = run_backend_tests.TestingTaskSpec( # type: ignore[no-untyped-call]352 test_target, True)353 swap_load_excluded_files = self.swap_with_checks(354 run_backend_tests, 'load_coverage_exclusion_list',355 lambda _: ['scripts.new_script_test'],356 expected_args=((COVERAGE_EXCLUSION_LIST_PATH,),))357 with self.print_swap, swap_load_excluded_files:358 run_backend_tests.check_test_results( # type: ignore[no-untyped-call]359 tasks, task_to_taskspec, True)360 self.assertNotIn(361 'INCOMPLETE COVERAGE (98%%): %s' % test_target, self.print_arr)362 self.assertIn(363 'SUCCESS %s: 9 tests (1.2 secs)' % test_target,364 self.print_arr)365 def test_test_failed_due_to_error_in_parsing_coverage_report(self) -> None:366 with self.swap_install_third_party_libs:367 from scripts import run_backend_tests368 task = MockTask()369 task_output = ['XYZ', '100']370 task_result = concurrent_task_utils.TaskResult(371 'task1', False, task_output, task_output)372 task.task_results = [task_result]373 tasks = [task]374 task_to_taskspec = {}375 test_target = 'scripts.random_script.py'376 task_to_taskspec[tasks[0]] = run_backend_tests.TestingTaskSpec( # type: ignore[no-untyped-call]377 test_target, True)378 with self.print_swap:379 run_backend_tests.check_test_results( # type: ignore[no-untyped-call]380 tasks, task_to_taskspec, True)381 self.assertIn(382 'An unexpected error occurred. '383 'Task output:\nXYZ',384 self.print_arr)385 def test_invalid_directory_in_sys_path_throws_error(self) -> None:386 with self.swap_install_third_party_libs:387 from scripts import run_backend_tests388 def mock_path_exists(dirname: str) -> bool:389 for directory in common.DIRS_TO_ADD_TO_SYS_PATH:390 if os.path.dirname(directory) == dirname:391 return False392 return True393 swap_path_exists = self.swap(os.path, 'exists', mock_path_exists)...

Full Screen

Full Screen

classes.py

Source:classes.py Github

copy

Full Screen

...60 for i in range(0, self.num_bands):61 self.tracker.append([])62 for i in range(0, 3*self.n_tests):63 self.pick_any_random_arm()64 return self.check_test_results()65 def pick_any_random_arm(self):66 arm_index = [i for i in range(len(self.row))]67 pa = random.choice(arm_index)68 arm_result = pull_arm(self.row, pa)69 self.tracker[pa].append(arm_result)70 def check_test_results(self):71 test_results = []72 for i in range(0, self.tracker.__len__()):73 if sum(bincount(self.tracker[i])) == 0:74 # print("No tests performed on arm #", i)75 test_results.append(-1)76 elif unique(self.tracker[i]).__len__() == 1:77 # print("Insufficient statistics to estimate odds on arm #", i)78 if self.tracker[i].__len__() > 3:79 test_results.append(sum(self.tracker[i]) / self.tracker[i].__len__())80 else:81 test_results.append(-2)82 else:83 test_results.append(bincount(self.tracker[i])[1] / sum(bincount(self.tracker[i])))84 return test_results85 def build_states_prob_lead(self):86 self.tracker = []87 print("The status of tracker in prob_lead before instantiation: ", self.tracker.__len__())88 for i in range(0, self.num_bands):89 self.tracker.append([])90 print("The status of tracker in prob_lead after instantiation: ", self.tracker.__len__())91 for n in range(0, self.n_tests):92 track_state_1, track_state_2 = self.track_state()93 # print(track_state_1)94 if all(st == 0 for st in track_state_2):95 self.pick_any_random_arm()96 elif any(st != 0 for st in track_state_2):97 if (self.bias_check_1 % 2) == 0:98 self.set_of_arms_pull("prob_lead", "known", track_state_1)99 else:100 self.set_of_arms_pull("prob_lead", "unknown", track_state_1)101 return self.check_test_results()102 def set_of_arms_pull(self, decision_type, k_or_u, track_state):103 known_arms = [i for i in range(len(track_state)) if track_state[i] > 0] # Identify index for known104 unknown_arms = [i for i in range(len(track_state)) if track_state[i] == 0] # Identify index for unknown105 if decision_type == "random": # This 'random' flag is redundant for normal operation but useful for debugging this106 # function107 if k_or_u == "unknown" and unknown_arms.__len__() != 0:108 choose_arm = random.choice(unknown_arms)109 else:110 choose_arm = random.choice(known_arms)111 arm_result = pull_arm(self.row, choose_arm)112 self.tracker[choose_arm].append(arm_result)113 self.bias_check_1 += 1114 return self.bias_check_1115 elif decision_type == "prob_lead":116 if k_or_u == "unknown" and unknown_arms.__len__() != 0:117 for i in range(3): # FIXME: This isn't working as expected; none of the self.trackers are pulling 3 arms118 choose_arm = random.choice(unknown_arms)119 arm_result = pull_arm(self.row, choose_arm)120 self.tracker[choose_arm].append(arm_result)121 else:122 ongoing_test_results = self.check_test_results()123 well_tracked_arms = [i for i in range(len(ongoing_test_results)) if ongoing_test_results[i] > 0.0]124 if (self.bias_check_2 % 2) == 0:125 if well_tracked_arms.__len__() >= 3: # TODO: "The 3 or more experiments could be a variable we play with"126 for i in range(3):127 choose_arm = ongoing_test_results.index(sorted([ongoing_test_results[i] for i in well_tracked_arms], reverse=True)[i])128 arm_result = pull_arm(self.row, choose_arm)129 self.tracker[choose_arm].append(arm_result)130 # TODO: I also need to set the "batch_pulls = True" self-flag and make sure it doesn't mess up with the unknown-known-wellknown cycle of arm-pulls. This cycle is probably a critical piece of optimization.131 # TODO: Start tracking some sort of metric to compare this new selection process vs. random sampling132 else:133 for i in range(3):134 choose_arm = random.choice(known_arms)135 arm_result = pull_arm(self.row, choose_arm)136 self.tracker[choose_arm].append(arm_result)...

Full Screen

Full Screen

test_break_test.py

Source:test_break_test.py Github

copy

Full Screen

...58 assert(testresults['mean']['h'] == 1)59 assert(error_dict['error_code_test'] == 0)60 assert(checkstats['n0'] == 6)61 assert(checkstats['n1'] == 6)62 assert(test.check_test_results()[1] == 'mean')63 assert(test.check_test_results()[0] == True)64def test_var_break():65 '''Test var break detection'''66 df, breaktime, timeframe = create_artificial_test_data('var')67 test = TsRelBreakTest(candidate=df.candidate,68 reference=df.reference,69 breaktime=breaktime, test_resample=None,70 test_check_min_data=3, test_check_spearR_sig=(-1, 1),71 bias_corr_method= None, alpha=0.01)72 test.run_tests()73 testresults, error_dict, checkstats = test.get_results()74 assert(testresults['var']['h'] == 1)75 assert(error_dict['error_code_test'] == 0)76 assert(checkstats['n0'] == 182)77 assert(checkstats['n1'] == 184)78 nptest.assert_almost_equal(checkstats['frame_spearmanR'], 0.8944298, 4)79 nptest.assert_almost_equal(checkstats['frame_corrPval'], 0, 4)80 assert(test.check_test_results()[1] == 'var')81 assert(test.check_test_results()[0] == True)82def test_merge_results():83 '''Test function for merged results dict'''84 df = read_test_data(431790)85 breaktime = datetime(2000, 7, 1)86 test = TsRelBreakTest(candidate=df['CCI_41_COMBINED'],87 reference=df['merra2'],88 breaktime=breaktime, test_resample=('M', 0.3),89 test_check_min_data=3, test_check_spearR_sig=(0, 0.1),90 bias_corr_method='linreg', alpha=0.01)91 test.run_tests()92 results_flat = test.get_flat_results()93 testresults, error_dict, checkstats = test.get_results()94 assert(dict_depth(results_flat) == 1)95 assert(dict_depth(testresults) == 3)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful