How to use test_l2 method in avocado

Best Python code snippet using avocado_python

trainer.py

Source:trainer.py Github

copy

Full Screen

1"""2@author: Yong Zheng Ong3implements the training procedure4"""5from timeit import default_timer6import os7from matplotlib.pyplot import ylim8import matplotlib.pyplot as plt9import torch10import torch.nn as nn11import numpy as np12from .model import Model13from ...loss.loss import LpLoss14class Trainer():15 def __init__(self, modelConfig, dataset, prefix="default"):16 """17 the trainer object for performing training, testing, etc18 """19 # save dataset20 self.dataset = dataset21 self.prefix = prefix22 self.save_folder = "./results/iae_net_{}.pt".format(self.prefix)23 # add additional dataset related details to modelConfig24 modelConfig["num_d"] = self.dataset.num_d25 modelConfig["input_channel"] = self.dataset.input_channel26 modelConfig["output_channel"] = self.dataset.output_channel27 modelConfig["size"] = self.dataset.s28 print(modelConfig)29 # instantiate model30 self.model = Model(**modelConfig).cuda()31 print("model will be saved to.. {}".format(self.save_folder))32 def train(self, epochs, batch_size, learning_rate, gamma):33 """34 perform training of the model for n epochs35 """36 available_subs = self.dataset.available_subs37 best_results = {38 'epoch': 0,39 'train': 0,40 'test': 10000000041 }42 if self.dataset.load_type in ["default", "multi"]:43 if self.dataset.y_train is None: # already packaged into dataset class44 train_loader = torch.utils.data.DataLoader(self.dataset.x_train, batch_size=batch_size, shuffle=True)45 test_loader = torch.utils.data.DataLoader(self.dataset.x_test, batch_size=batch_size, shuffle=False)46 else:47 train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(self.dataset.x_train, self.dataset.y_train), batch_size=batch_size, shuffle=True)48 test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(self.dataset.x_test, self.dataset.y_test), batch_size=batch_size, shuffle=False)49 elif self.dataset.load_type in ["multi3"]:50 train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(*self.dataset.x_train, *self.dataset.y_train), batch_size=batch_size, shuffle=True)51 test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(*self.dataset.x_test, *self.dataset.y_test), batch_size=batch_size, shuffle=False)52 optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate, weight_decay=1e-4)53 scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=gamma, patience=20)54 myloss = LpLoss(size_average=False)55 for ep in range(epochs):56 self.model.train()57 t1 = default_timer()58 train_l2 = 0.059 if self.dataset.load_type in ["default", "multi"]:60 for x, y in train_loader:61 optimizer.zero_grad()62 if self.dataset.num_d == 1:63 if self.dataset.load_type == "default":64 x, y = x.cuda(), y.cuda()65 out = self.model(x)66 l2 = myloss(out.reshape(batch_size, -1), y.reshape(batch_size, -1))67 loss = l268 loss.backward()69 elif self.dataset.load_type == "multi":70 for i in range(len(available_subs)):71 # subsample the training data72 x_temp = x[:,::available_subs[i]].cuda()73 y_temp = y[:,::available_subs[i]].cuda()74 out = self.model(x_temp)75 loss = myloss(out.reshape(batch_size, -1), y_temp.reshape(batch_size, -1))76 loss.backward()77 if self.dataset.num_d == 2:78 if self.dataset.load_type == "multi":79 for i in range(len(available_subs)):80 # subsample the training data81 x_temp, y_temp = self.dataset.build_grid_data(x, y, i)82 x_temp = x_temp.cuda()83 y_temp = y_temp.cuda()84 out = self.model(x_temp).squeeze()85 if self.dataset.num_d == 2 and self.dataset.y_normalizer is not None:86 out = self.dataset.y_normalizer[i].decode(out)87 y_temp = self.dataset.y_normalizer[i].decode(y_temp)88 loss = myloss(out.reshape(batch_size, -1), y_temp.reshape(batch_size, -1))89 loss.backward()90 optimizer.step()91 train_l2 += loss.item()92 elif self.dataset.load_type in ["multi3"]:93 for data in train_loader:94 optimizer.zero_grad()95 loss = 0.096 for i in range(len(available_subs)):97 x = data[i].cuda()98 y = data[len(available_subs)+i].cuda()99 out = self.model(x)100 if self.dataset.num_d == 2 and self.dataset.y_normalizer is not None:101 out = self.dataset.y_normalizer[i].decode(out)102 y = self.dataset.y_normalizer[i].decode(y)103 loss += myloss(out.reshape(batch_size, -1), y.reshape(batch_size, -1))104 loss.backward()105 optimizer.step()106 train_l2 += loss.item()107 train_l2 /= self.dataset.ntrain108 # test the model109 self.model.eval()110 test_l2 = np.array([0.0] * len(available_subs))111 with torch.no_grad():112 if self.dataset.load_type in ["default", "multi"]:113 for x, y in test_loader:114 x, y = x.cuda(), y.cuda()115 if self.dataset.num_d == 1:116 if self.dataset.load_type in ["default", "multi"]:117 for i in range(len(available_subs)):118 # subsample the training data119 x_temp = x[:,::available_subs[i]]120 y_temp = y[:,::available_subs[i]]121 out = self.model(x_temp)122 test_l2[i] += myloss(out.reshape(batch_size, -1), y_temp.reshape(batch_size, -1)).item()123 if self.dataset.num_d == 2:124 if self.dataset.load_type in ["default", "multi"]:125 for i in range(len(available_subs)):126 # subsample the training data127 x_temp, y_temp = self.dataset.build_grid_data(x, y, i)128 out = self.model(x_temp).squeeze()129 if self.dataset.num_d == 2 and self.dataset.y_normalizer is not None:130 out = self.dataset.y_normalizer[i].decode(out)131 y_temp = self.dataset.y_normalizer[i].decode(y_temp)132 test_l2[i] += myloss(out.reshape(batch_size, -1), y_temp.reshape(batch_size, -1)).item()133 elif self.dataset.load_type in ["multi3"]:134 for data in test_loader:135 for i in range(len(available_subs)):136 x = data[i].cuda()137 y = data[len(available_subs)+i].cuda()138 out = self.model(x).squeeze()139 if self.dataset.num_d == 2 and self.dataset.y_normalizer is not None:140 out = self.dataset.y_normalizer[i].decode(out)141 y = self.dataset.y_normalizer[i].decode(y)142 test_l2[i] += myloss(out.reshape(batch_size, -1), y.reshape(batch_size, -1)).item()143 test_l2 /= self.dataset.ntest144 scheduler.step(test_l2[self.dataset.ts])145 t2 = default_timer()146 # if results improved, update best results147 if self.dataset.num_d == 1 and test_l2[self.dataset.ts] <= best_results['test']:148 print("saving improved model...")149 best_results['epoch'] = ep150 best_results['train'] = train_l2151 best_results['test'] = test_l2[self.dataset.ts]152 torch.save(self.model, self.save_folder)153 if self.dataset.num_d == 2 and test_l2[self.dataset.ts] <= best_results['test']:154 print("saving improved model...")155 best_results['epoch'] = ep156 best_results['train'] = train_l2157 best_results['test'] = test_l2[self.dataset.ts]158 torch.save(self.model, self.save_folder)159 print(ep, f'{t2-t1:.4e}', f'{train_l2:.4e}', test_l2, scheduler.optimizer.param_groups[0]['lr'])...

Full Screen

Full Screen

test_multipler.py

Source:test_multipler.py Github

copy

Full Screen

1import pytest2from sandpit_package.mathers.multiplier import Multiplier3expected_op = "*"4test_l1 = 55test_r1 = 76expected_result1 = test_l1 * test_r17expected_msg1 = "%s %s %s = %s" % (test_l1, expected_op, test_r1, expected_result1)8test_l2 = "abc"9test_r2 = 510expected_result2 = test_l2 * test_r211expected_msg2 = "%s %s %s = %s" % (test_l2, expected_op, test_r2, expected_result2)12def test_init():13 with pytest.raises(Exception):14 x1 = Multiplier()15 with pytest.raises(Exception):16 x2 = Multiplier(test_l1)17 x3 = Multiplier(test_l1, test_r1)18 assert (x3.l, x3.r) == (test_l1, test_r1)19 x4 = Multiplier(test_l2, test_r2)20 assert (x4.l, x4.r) == (test_l2, test_r2)21def test_op_str():22 x = Multiplier(test_l1, test_r1)23 assert x._op_str() == expected_op24def test_result():25 x1 = Multiplier(test_l1, test_r1)26 assert x1.result() == expected_result127 x1 = Multiplier(test_r1, test_l1)28 assert x1.result() == expected_result129 x2 = Multiplier(test_l2, test_r2)30 assert x2.result() == expected_result231 x2 = Multiplier(test_r2, test_l2)32 assert x2.result() == expected_result233def test_msg():34 x1 = Multiplier(test_l1, test_r1)35 assert x1._msg() == expected_msg136 x2 = Multiplier(test_l2, test_r2)37 assert x2._msg() == expected_msg238def test_print(capsys):39 x1 = Multiplier(test_l1, test_r1)40 x1.print()...

Full Screen

Full Screen

test_adder.py

Source:test_adder.py Github

copy

Full Screen

1import pytest2from sandpit_package.mathers.adder import Adder3expected_op = "+"4test_l1 = 55test_r1 = 76expected_result1 = test_l1 + test_r17expected_msg1 = "%s %s %s = %s" % (test_l1, expected_op, test_r1, expected_result1)8test_l2 = "abc"9test_r2 = "def"10expected_result2 = test_l2 + test_r211expected_msg2 = "%s %s %s = %s" % (test_l2, expected_op, test_r2, expected_result2)12def test_init():13 with pytest.raises(Exception):14 x1 = Adder()15 with pytest.raises(Exception):16 x2 = Adder(test_l1)17 x3 = Adder(test_l1, test_r1)18 assert (x3.l, x3.r) == (test_l1, test_r1)19 x4 = Adder(test_l2, test_r2)20 assert (x4.l, x4.r) == (test_l2, test_r2)21def test_op_str():22 x = Adder(test_l1, test_r1)23 assert x._op_str() == expected_op24def test_result():25 x1 = Adder(test_l1, test_r1)26 assert x1.result() == expected_result127 x1 = Adder(test_r1, test_l1)28 assert x1.result() == expected_result129 x2 = Adder(test_l2, test_r2)30 assert x2.result() == expected_result231 x2 = Adder(test_r2, test_l2)32 assert x2.result() != expected_result233def test_msg():34 x1 = Adder(test_l1, test_r1)35 assert x1._msg() == expected_msg136 x2 = Adder(test_l2, test_r2)37 assert x2._msg() == expected_msg238def test_print(capsys):39 x1 = Adder(test_l1, test_r1)40 x1.print()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful