How to use get_tasks method in autotest

Best Python code snippet using autotest_python

MTL_training.py

Source:MTL_training.py Github

copy

Full Screen

...39 optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)40scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)41nepochs = 100 # @param{type:"integer"}42train_losses = {}43for key in model.get_tasks():44 train_losses[key] = []45train_losses["all"] = []46validation_losses = {}47for key in model.get_tasks():48 validation_losses[key] = []49validation_losses["all"] = []50test_losses = {}51for key in model.get_tasks():52 test_losses[key] = []53test_losses["all"] = []54weights = {}55for key in model.get_tasks():56 weights[key] = []57cosine_similarities = {}58std_cosines = {}59for key in model.get_tasks():60 cosine_similarities[key] = []61 cosine_similarities[key].append(0)62 std_cosines[key] = []63 std_cosines[key].append(0)64# initialize necessary metrics objects65train_meters = {}66for key in model.get_tasks():67 train_meters[key] = AverageMeter()68train_meters["all"] = AverageMeter()69validation_meters = {}70for key in model.get_tasks():71 validation_meters[key] = AverageMeter()72validation_meters["all"] = AverageMeter()73test_meters = {}74for key in model.get_tasks():75 test_meters[key] = AverageMeter()76test_meters["all"] = AverageMeter()77# function to reset metrics78def reset_metrics():79 for key in train_meters.keys():80 train_meters[key].reset()81 for key in validation_meters.keys():82 validation_meters[key].reset()83 84def evaluate_model(data="train"):85 mean_loss = {}86 if data == "train":87 loader = trainLoader88 for key in train_meters.keys():89 mean_loss[key] = train_meters[key]90 elif data == "test":91 loader = testLoader92 for key in test_meters.keys():93 mean_loss[key] = test_meters[key]94 elif data == "validation":95 loader = validationLoader96 for key in validation_meters.keys():97 mean_loss[key] = validation_meters[key]98 sys.stdout.write(f"Evaluation of {data} data:\n")99 # iteration over the dataset100 for x_input, y_target in Bar(loader):101 x_input, y_target = x_input.to(device=device), y_target.to(device=device) # move to GPU102 output = model.eval()(x_input.float())103 current_weights = evaluate_criterion.get_weights()104 losses = evaluate_criterion(output, y_target)105 losses = {key: losses[ind] for ind, key in enumerate(model.get_tasks())}106 loss = sum([current_weights[key] * losses[key] for key in model.get_tasks()]) + evaluate_criterion.get_regularization()107 # update metrics108 mean_loss["all"](loss.item(), len(y_target))109 for key in model.get_tasks():110 mean_loss[key](losses[key].item(), len(y_target))111for epoch in torch.arange(0, nepochs + 1):112 start = time.time() # start to time113 reset_metrics() # reset the metrics from the previous epoch114 sys.stdout.write(f"\n\nEpoch {epoch}/{nepochs}\n")115 if epoch == 0:116 evaluate_model(data="train") # first pass through the network117 else:118 sys.stdout.write(f"Training:\n")119 120 count = 0121 122 current_cosines = {}123 for key in model.get_tasks():124 current_cosines[key] = []125 for x_input, y_target in Bar(trainLoader):126 x_input, y_target = x_input.to(device=device), y_target.to(device=device) # move to GPU127 optimizer.zero_grad() # Zero the gradient buffers128 output = model.train()(x_input.float()) # compute the output129 130 if epoch >= 1 and weighting_strategy == "WeightedDynamicalAverage":131 loss_criterion.update_weights()132 133 current_weights = loss_criterion.get_weights()134 losses = loss_criterion(output, y_target) # get single losses135 losses = {key: losses[ind] for ind, key in enumerate(model.get_tasks())}136 137 count += 1138 gradients = {}139 for key in model.get_tasks():140 gradients[key] = compute_weights_gradients(model, current_weights[key], losses[key], key, weighting_strategy)141 142 set_common_gradients(model, current_weights, gradients, weighting_strategy, current_cosines)143 optimizer.step() # update weights with cumulative gradients of chosen tasks144 145 loss = sum([current_weights[key] * losses[key] for key in model.get_tasks()]) + loss_criterion.get_regularization()146 train_meters["all"](loss.item(), len(y_target))147 for key in model.get_tasks():148 train_meters[key](losses[key].item(), len(y_target))149 if weighting_strategy == "OL_AUX" and count % N == 0:150 loss_criterion.update_weights(compute_delta_w_i(loss_criterion.get_gradients(),151 np.sum(current_cosines["MI"]), {k: np.sum(cosine_similarities[k]) for k in set(list(current_cosines.keys())) - {"MI"}},152 lr, momentum=0.0)) # update weights153 scheduler.step()154 for key in model.get_tasks():155 cosine_similarities[key].append(np.mean(current_cosines[key]))156 std_cosines[key].append(np.std(current_cosines[key], ddof=1) / np.sqrt(len(current_cosines[key])))157 # evaluate the model on the validation set and print statistics for the current epoch158 evaluate_model(data="validation")159 train_loss = train_meters["all"].result()160 validation_loss = validation_meters["all"].result()161 train_results_string = f"\n Finished epoch {epoch}/{nepochs}: Train Loss {train_loss} | Validation Loss {validation_loss} \n"162 sys.stdout.write(train_results_string)163 164 specific_tasks_train_string = f"\n Finished epoch {epoch}/{nepochs}: "165 for key in model.get_tasks():166 train_loss = train_meters[key].result()167 specific_tasks_train_string += f" {key} Train Loss {train_loss} |"168 specific_tasks_train_string += "\n"169 sys.stdout.write(specific_tasks_train_string)170 171 specific_tasks_validation_string = f"\n Finished epoch {epoch}/{nepochs}: "172 for key in model.get_tasks():173 validation_loss = validation_meters[key].result()174 specific_tasks_validation_string += f" {key} Validation Loss {validation_loss} |"175 specific_tasks_validation_string += "\n"176 sys.stdout.write(specific_tasks_validation_string)177 178 specific_tasks_weights_string = f"\n Finished epoch {epoch}/{nepochs}: "179 for key in model.get_tasks():180 weight = loss_criterion.get_weights()[key].cpu()181 specific_tasks_weights_string += f" {key} weight {weight} | "182 specific_tasks_weights_string += "\n"183 sys.stdout.write(specific_tasks_weights_string)184 # collect training statistics of the current epoch185 for key in train_meters.keys():186 train_losses[key].append(train_meters[key].result())187 for key in validation_meters.keys():188 validation_losses[key].append(validation_meters[key].result())189 for key in loss_criterion.get_weights().keys():190 weights[key].append(loss_criterion.get_weights()[key].cpu().item())191 if epoch >= 1 and weighting_strategy == "WeightedDynamicalAverage":192 for key in model.get_tasks():193 eval("loss_criterion.compute_" + str(key) + "_lambda")(train_losses[key][-1], train_losses[key][-2])194stats = {195 "train_losses": train_losses,196 "validation_losses": validation_losses,197 "weights": weights198}199print(cosine_similarities)200print(std_cosines)201delim = "_"202tasks = list(map(str, tasks))203tasks = delim.join(tasks)204if weighting_strategy == "OL_AUX":205 weighting_strategy += "_"206 weighting_strategy += str(N)...

Full Screen

Full Screen

MessageNonInterruptTest.py

Source:MessageNonInterruptTest.py Github

copy

Full Screen

...14 self.do_next_exclusive_step(15 'Select Test', choice='Message Non Interrupt')16 self.workflow.do_engine_steps()17 self.save_restore()18 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))19 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING)))20 self.do_next_exclusive_step('Do Something That Takes A Long Time')21 self.save_restore()22 self.workflow.do_engine_steps()23 self.assertEqual(0, len(self.workflow.get_tasks(TaskState.WAITING)))24 self.save_restore()25 self.workflow.do_engine_steps()26 self.assertEqual(27 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING)))28 def testRunThroughMessageInterruptSaveAndRestore(self):29 self.workflow = BpmnWorkflow(self.spec, self.subprocesses)30 self.save_restore()31 self.do_next_exclusive_step(32 'Select Test', choice='Message Non Interrupt')33 self.workflow.do_engine_steps()34 self.save_restore()35 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))36 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING)))37 self.workflow.catch(MessageEventDefinition('Test Message'))38 self.save_restore()39 self.workflow.do_engine_steps()40 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING)))41 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY)))42 self.do_next_named_step('Acknowledge Non-Interrupt Message')43 self.workflow.do_engine_steps()44 self.save_restore()45 self.workflow.do_engine_steps()46 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))47 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))48 self.do_next_named_step('Do Something That Takes A Long Time')49 self.workflow.do_engine_steps()50 self.save_restore()51 self.workflow.do_engine_steps()52 self.assertEqual(53 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING)))54 def testRunThroughHappy(self):55 self.workflow = BpmnWorkflow(self.spec, self.subprocesses)56 self.do_next_exclusive_step(57 'Select Test', choice='Message Non Interrupt')58 self.workflow.do_engine_steps()59 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))60 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING)))61 self.do_next_exclusive_step('Do Something That Takes A Long Time')62 self.workflow.do_engine_steps()63 self.assertEqual(0, len(self.workflow.get_tasks(TaskState.WAITING)))64 self.workflow.do_engine_steps()65 self.assertEqual(66 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING)))67 def testRunThroughMessageInterrupt(self):68 self.workflow = BpmnWorkflow(self.spec, self.subprocesses)69 self.do_next_exclusive_step(70 'Select Test', choice='Message Non Interrupt')71 self.workflow.do_engine_steps()72 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))73 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING)))74 self.workflow.catch(MessageEventDefinition('Test Message'))75 self.workflow.do_engine_steps()76 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING)))77 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY)))78 self.do_next_named_step('Acknowledge Non-Interrupt Message')79 self.workflow.do_engine_steps()80 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))81 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING)))82 self.do_next_named_step('Do Something That Takes A Long Time')83 self.workflow.do_engine_steps()84 self.assertEqual(85 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING)))86 def testRunThroughMessageInterruptOtherOrder(self):87 self.workflow = BpmnWorkflow(self.spec, self.subprocesses)88 self.do_next_exclusive_step(89 'Select Test', choice='Message Non Interrupt')90 self.workflow.do_engine_steps()91 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))92 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING)))93 self.workflow.catch(MessageEventDefinition('Test Message'))94 self.workflow.do_engine_steps()95 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING)))96 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY)))97 self.do_next_named_step('Do Something That Takes A Long Time')98 self.workflow.do_engine_steps()99 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))100 self.do_next_named_step('Acknowledge Non-Interrupt Message')101 self.workflow.do_engine_steps()102 self.assertEqual(103 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING)))104 def testRunThroughMessageInterruptOtherOrderSaveAndRestore(self):105 self.workflow = BpmnWorkflow(self.spec, self.subprocesses)106 self.save_restore()107 self.do_next_exclusive_step(108 'Select Test', choice='Message Non Interrupt')109 self.workflow.do_engine_steps()110 self.save_restore()111 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))112 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING)))113 self.workflow.catch(MessageEventDefinition('Test Message'))114 self.save_restore()115 self.workflow.do_engine_steps()116 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING)))117 self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY)))118 self.do_next_named_step('Do Something That Takes A Long Time')119 self.save_restore()120 self.workflow.do_engine_steps()121 self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY)))122 self.do_next_named_step('Acknowledge Non-Interrupt Message')123 self.save_restore()124 self.workflow.do_engine_steps()125 self.assertEqual(126 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING)))127def suite():128 return unittest.TestLoader().loadTestsFromTestCase(MessageNonInterruptTest)129if __name__ == '__main__':...

Full Screen

Full Screen

test_execution_state.py

Source:test_execution_state.py Github

copy

Full Screen

...14class TestExecutionState:15 def test_initial_state(self):16 job = mock_job_exec(2)17 assert job.state == ExecutionState.SUBMITTED18 for task in job.get_tasks():19 assert task.state == ExecutionState.SUBMITTED20 def test_schedule_state_transition(self):21 job = mock_job_exec(2)22 job.get_tasks()[0].on_schedule()23 assert job.state == ExecutionState.SCHEDULED24 for task in job.get_tasks()[1:]:25 task.on_schedule()26 assert job.state == ExecutionState.SCHEDULED27 def test_dispatch_state(self):28 job = mock_job_exec(2)29 job.get_tasks()[0].on_schedule()30 job.get_tasks()[0].on_dispatch()31 assert job.state == ExecutionState.DISPATCHED32 for task in job.get_tasks()[1:]:33 task.on_schedule()34 assert job.state == ExecutionState.DISPATCHED35 for task in job.get_tasks()[1:]:36 task.on_dispatch()37 assert job.state == ExecutionState.DISPATCHED38 def test_running_state(self):39 job = mock_job_exec(2)40 job.get_tasks()[0].on_schedule()41 job.get_tasks()[0].on_dispatch()42 job.get_tasks()[0].on_start()43 assert job.state == ExecutionState.RUNNING44 for task in job.get_tasks()[1:]:45 task.on_schedule()46 assert job.state == ExecutionState.RUNNING47 for task in job.get_tasks()[1:]:48 task.on_dispatch()49 assert job.state == ExecutionState.RUNNING50 for task in job.get_tasks()[1:]:51 task.on_start()52 assert job.state == ExecutionState.RUNNING53 def test_running_state_2(self):54 job = mock_job_exec(2)55 job.get_tasks()[0].on_schedule()56 with pytest.raises(IllegalStateTransitionException):57 job.get_tasks()[0].on_start()58 def test_finish_state(self):59 job = mock_job_exec(2)60 job.get_tasks()[0].on_schedule()61 job.get_tasks()[0].on_dispatch()62 job.get_tasks()[0].on_start()63 job.get_tasks()[0].on_finish()64 assert job.state == ExecutionState.RUNNING65 for task in job.get_tasks()[1:]:66 task.on_schedule()67 assert job.state == ExecutionState.RUNNING68 for task in job.get_tasks()[1:]:69 task.on_dispatch()70 assert job.state == ExecutionState.RUNNING71 for task in job.get_tasks()[1:]:72 task.on_start()73 assert job.state == ExecutionState.RUNNING74 for task in job.get_tasks()[1:]:75 task.on_finish()76 assert job.state == ExecutionState.FINISHED77 def test_finish_state_2(self):78 job = mock_job_exec(2)79 job.get_tasks()[0].on_schedule()80 job.get_tasks()[0].on_dispatch()81 with pytest.raises(IllegalStateTransitionException):82 job.get_tasks()[0].on_finish()83 def test_fail_state_1(self):84 job = mock_job_exec(2)85 job.get_tasks()[0].on_schedule()86 job.get_tasks()[0].on_dispatch()87 job.get_tasks()[0].on_start()88 job.get_tasks()[0].on_fail()89 assert job.state == ExecutionState.FAILED90 for task in job.get_tasks()[1:]:91 task.on_schedule()92 task.on_dispatch()93 task.on_start()94 task.on_finish()95 assert job.state == ExecutionState.FAILED96 def test_fail_state_2(self):97 job = mock_job_exec(2)98 job.get_tasks()[0].on_schedule()99 job.get_tasks()[0].on_dispatch()100 job.get_tasks()[0].on_start()101 job.get_tasks()[0].on_finish()102 assert job.state == ExecutionState.RUNNING103 for task in job.get_tasks()[1:]:104 task.on_schedule()105 task.on_dispatch()106 task.on_start()107 task.on_fail()108 assert job.state == ExecutionState.FAILED109 def test_state_reset(self):110 job = mock_job_exec(2)111 job.get_tasks()[0].on_schedule()112 job.get_tasks()[0].on_dispatch()113 job.get_tasks()[0].on_start()114 job.get_tasks()[0].on_fail()115 assert job.state == ExecutionState.FAILED116 for task in job.get_tasks()[1:]:117 task.on_schedule()118 task.on_dispatch()119 task.on_start()120 task.on_finish()121 assert job.state == ExecutionState.FAILED122 job.get_tasks()[0].on_reset()123 assert job.state == ExecutionState.RUNNING124 job.get_tasks()[0].on_schedule()125 job.get_tasks()[0].on_dispatch()126 job.get_tasks()[0].on_start()127 assert job.state == ExecutionState.RUNNING128 job.get_tasks()[0].on_finish()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful