How to use test_end_callback method in Slash

Best Python code snippet using slash

train_LSTM.py

Source:train_LSTM.py Github

copy

Full Screen

1import os2from argparse import ArgumentParser3import torch4from torch.nn import functional as F5import pytorch_lightning as pl6import wandb7import sys8import random9import numpy as np10sys.path.append('..')11from models.LSTM import S2S_LSTM12from base_module import BaseDynamicsModule13from utilities.toolsies import seed_everything, str2bool14from utilities.callbacks import BestValidationCallback, TestEndCallback15class DynamicsLSTM(BaseDynamicsModule):16 def __init__(self, *args, **kwargs):17 super().__init__(*args, **kwargs)18 self.model = S2S_LSTM(coord_dims=self.coord_dim,19 hidden_size=self.hparams.model_hidden_size, 20 num_layers=self.hparams.model_num_layers)21 22 def rollout(self, batch, start, rollout_size, refeed=True):23 trajectory = batch['trajectory'].to(self.device)24 input_end_point = output_start_point = start + self.hparams.model_input_size25 input_trajectory = trajectory[:, start:input_end_point, :]26 ground_truth = trajectory[:, output_start_point:(output_start_point+rollout_size), :]27 if refeed:28 output = self.model(input_trajectory, target_len=self.hparams.model_output_size)29 while output.size(1) < rollout_size: #keep rolling till we reach the required size30 #if the model output is smaller than the input use previous data31 if self.hparams.model_output_size < self.hparams.model_input_size:32 keep_from_input = self.hparams.model_input_size - self.hparams.model_output_size33 input_trajectory = torch.cat((input_trajectory[:, -keep_from_input:, :], 34 output[:, -self.hparams.model_output_size:,:]), dim=1)35 else:36 input_trajectory = output[:, -self.hparams.model_input_size:, :]37 out_one = self.model(input_trajectory, target_len=self.hparams.model_output_size)38 output = torch.cat((output, out_one), dim=1)39 return output[:, :rollout_size, :], ground_truth40 else:41 output = self.model(input_trajectory, target_len=rollout_size)42 return output, ground_truth43 44 def forward(self, batch):45 # one forward pass with the models default input output sizes46 # the starting point is randomized in here47 target_len = self.hparams.model_output_size48 trajectory = batch['trajectory']49 start = self.get_start(batch, target_len)50 input_end_point = output_start_point = start + self.hparams.model_input_size51 input = trajectory[:, start:input_end_point, :]52 target = trajectory[:, output_start_point:(output_start_point + target_len), :]53 tfr = self.hparams.teacher_forcing_ratio54 # use teacher forcing55 if (tfr>0.0) and (random.random() < tfr):56 output = self.model(input, target_len, target)57 else: # predict recursively58 output = self.model(input, target_len, None)59 return output, target60 def training_step(self, train_batch, batch_idx):61 rec_loss = 0.062 for i in range(self.hparams.samples_per_batch_train):63 output_trajectory, target_trajectory = self.forward(train_batch)64 rec_loss= rec_loss + self.reconstruction_loss(output_trajectory, target_trajectory)65 rec_loss = rec_loss/self.hparams.samples_per_batch_train66 self.log('train/rec', rec_loss, prog_bar=True, on_step=False, on_epoch=True)67 # Log longer losses68 with torch.no_grad():69 if (batch_idx % self.hparams.log_freq) == 0:70 self.log_rec_losses(train_batch, 'train', self.val_rec_loss_sizes)71 return rec_loss72 def training_epoch_end(self, outputs):73 if self.hparams.teacher_forcing_ratio>0.0:74 self.hparams.teacher_forcing_ratio -= self.hparams.teacher_forcing_reduction75 else:76 self.hparams.teacher_forcing_ratio=0.077 self.log('train/teacher_forcing_ratio', self.hparams.teacher_forcing_ratio)78 def validation_step(self, val_batch, batch_idx):79 for i in range(self.hparams.samples_per_batch_val):80 self.log_rec_losses(val_batch, 'val', self.val_rec_loss_sizes)81 def test_step(self, test_batch, batch_idx, dataloader_idx=None):82 for i in range(self.hparams.samples_per_batch_test):83 self.log_rec_losses(test_batch, 'test', self.test_rec_loss_sizes)84 def log_rec_losses(self, batch, stage, rec_loss_sizes, on_epoch=True, on_step=False):85 # reconstruction losses for longer trajectories.86 max_rollout = np.max(rec_loss_sizes) 87 start = self.get_start(batch, rec_loss_sizes)88 output, target = self.rollout(batch, start=start, rollout_size=max_rollout, refeed=True)89 output = output.to(self.device)90 target = target.to(self.device)91 for step in rec_loss_sizes:92 rec_loss = self.reconstruction_loss(output[:, :step], target[:, :step])93 self.log(f'{stage}/rec/cumm/{step:04d}', rec_loss, on_step=on_step, on_epoch=on_epoch)94 rec_loss = self.reconstruction_loss(output[:, (step-1):step], target[:, (step-1):step])95 self.log(f'{stage}/rec/{step:04d}', rec_loss, on_step=on_step, on_epoch=on_epoch)96 # Also log the internal h propagation results97 output, target = self.rollout(batch, start=start, rollout_size=max_rollout, refeed=False)98 output = output.to(self.device)99 target = target.to(self.device)100 for step in rec_loss_sizes:101 rec_loss = self.reconstruction_loss(output[:, :step], target[:, :step])102 self.log(f'{stage}/rec/cumm/hprop/{step:04d}', rec_loss, on_step=on_step, on_epoch=on_epoch)103 rec_loss = self.reconstruction_loss(output[:, (step-1):step], target[:, (step-1):step])104 self.log(f'{stage}/rec/hprop/{step:04d}', rec_loss, on_step=on_step, on_epoch=on_epoch)105if __name__ == '__main__':106 parser = ArgumentParser()107 parser.add_argument('--project_name', default='dummy')108 parser.add_argument('--model', default='lstm')109 parser.add_argument('--dataset', default='pendulum-2')110 parser.add_argument('--dataset_dt', type=float, default=0.05)111 parser.add_argument('--coordinates', default='phase_space')112 parser.add_argument('--noise_std', type=float, default=0.0)113 parser.add_argument('--rec_loss_type', type=str, default='L1')114 parser.add_argument('--model_hidden_size', type=int, default=100)115 parser.add_argument('--model_num_layers', type=int, default=2)116 parser.add_argument('--model_input_size', type=int, default=10)117 parser.add_argument('--model_output_size', type=int, default=1)118 parser.add_argument('--teacher_forcing_ratio', type=float, default=1.0)119 parser.add_argument('--teacher_forcing_reduction', type=float, default=0.05)120 parser.add_argument('--batch_size', type=int, default=16)121 parser.add_argument('--learning_rate', type=float, default=1e-3)122 parser.add_argument('--batch_size_val', type=int, default=16)123 parser.add_argument('--samples_per_batch_train', type=int, default=1)124 parser.add_argument('--samples_per_batch_val', type=int, default=1)125 parser.add_argument('--samples_per_batch_test', type=int, default=10)126 parser.add_argument('--use_random_start', type=str2bool)127 parser.add_argument('--model_dropout_pct', type=float, default=0.0)128 parser.add_argument('--scheduler_patience', type=int, default=20)129 parser.add_argument('--scheduler_factor', type=float, default=0.3)130 parser.add_argument('--scheduler_min_lr', type=float, default=1e-7)131 parser.add_argument('--scheduler_threshold', type=float, default=1e-5)132 parser.add_argument('--weight_decay', type=float, default=0)133 parser.add_argument('--max_epochs', type=int, default=2000)134 parser.add_argument('--monitor', type=str, default='val/rec/0001')135 parser.add_argument('--early_stopping_patience', type=int, default=60)136 parser.add_argument('--gpus', type=int, default=0)137 parser.add_argument('--num_workers', type=int, default=4)138 parser.add_argument('--seed', type=int, default=1)139 parser.add_argument('--use_wandb', type=str2bool, default=True)140 parser.add_argument('--log_freq', type=int, default=100)141 parser.add_argument('--fast_dev_run', type=str2bool, default=False)142 parser.add_argument('--debug', type=str2bool, default=False)143 parser.add_argument('--progress_bar_refresh_rate', type=int, default=100)144 hparams = parser.parse_args()145 print(hparams)146 147 seed_everything(hparams.seed)148 pl.seed_everything(hparams.seed)149 model = DynamicsLSTM(**vars(hparams))150 print(model)151 if hparams.use_wandb:152 save_dir = os.path.join(os.environ['WANDB_DIR'], hparams.project_name)153 os.makedirs(save_dir, exist_ok=True)154 logger = pl.loggers.WandbLogger(project=hparams.project_name, save_dir=save_dir)155 logger.log_hyperparams(vars(hparams))156 if hparams.debug:157 logger.watch(model)158 checkpoint_dir = os.path.join(logger.experiment.dir, 'checkpoints/')159 else:160 # log_dir = os.path.join(os.environ['EXP_DIR'], 'tensorboard')161 log_dir = '~/tensorboard/'162 print(f'Using tensorboard from {log_dir}')163 os.makedirs(os.path.join(log_dir, hparams.project_name), exist_ok=True)164 experiment_name = f'in_{hparams.model_input_size}_out{hparams.model_output_size}'165 logger = pl.loggers.TensorBoardLogger(save_dir=log_dir, name=experiment_name)166 checkpoint_dir = logger.log_dir167 os.makedirs(checkpoint_dir, exist_ok=True)168 print(f'Checkpoint dir {checkpoint_dir}')169 lr_monitor_callback = pl.callbacks.LearningRateMonitor()170 early_stop_callback = pl.callbacks.EarlyStopping(monitor=hparams.monitor, min_delta=0.00, 171 patience=hparams.early_stopping_patience, verbose=True, mode='min')172 checkpoint_callback = pl.callbacks.ModelCheckpoint(173 dirpath=checkpoint_dir,174 filename='{epoch}',175 monitor=hparams.monitor, 176 save_top_k=1,verbose=True, mode='min',177 save_last=False)178 best_validation_callback = BestValidationCallback(hparams.monitor, hparams.use_wandb)179 test_end_callback = TestEndCallback(hparams.use_wandb)180 181 trainer = pl.Trainer.from_argparse_args(hparams, logger=logger,182 log_every_n_steps=1,183 callbacks=[checkpoint_callback,184 early_stop_callback, 185 lr_monitor_callback, 186 best_validation_callback,187 test_end_callback188 ],189 deterministic=True,190 progress_bar_refresh_rate=hparams.progress_bar_refresh_rate191 )192 trainer.fit(model)193 if not hparams.fast_dev_run:...

Full Screen

Full Screen

train_MLP.py

Source:train_MLP.py Github

copy

Full Screen

1import os2from argparse import ArgumentParser3import torch4from torch.nn import functional as F5import pytorch_lightning as pl6import wandb7import sys8import random9sys.path.append('..')10from models.MLP import MLP11from base_module import BaseDynamicsModule12from utilities.toolsies import seed_everything, str2bool13from utilities.callbacks import BestValidationCallback, TestEndCallback14class DynamicsMLP(BaseDynamicsModule):15 def __init__(self, *args, **kwargs):16 super().__init__(*args, **kwargs) 17 self.model = MLP(input_size = self.hparams.model_input_size, 18 output_size = self.hparams.model_output_size,19 model_size = self.hparams.model_hidden_size, 20 latent_size = self.hparams.model_latent_size, 21 nonlinearity = self.hparams.model_nonlinearity,22 coord_dim = self.coord_dim,23 use_layer_norm=self.hparams.use_layer_norm)24 25 if self.hparams.use_supervision and self.hparams.sup_loss_type=='sigmoid_parametrized':26 self.w1 = torch.nn.Parameter(torch.tensor(1.0))27 self.w2 = torch.nn.Parameter(torch.tensor(1.0))28 29 def rollout(self, batch, start, rollout_size):30 trajectory = batch['trajectory']31 input_end_point = output_start_point = start + self.hparams.model_input_size32 input_trajectory = trajectory[:, start:input_end_point, :]33 output = self.model(input_trajectory)[0]34 model_input_size = self.hparams.model_input_size35 model_output_size = self.hparams.model_output_size36 while output.size(1) < rollout_size: #keep rolling till we reach the required size37 #if the model output is smaller than the input use previous data38 if model_output_size < model_input_size:39 keep_from_input = model_input_size - model_output_size40 input_trajectory = torch.cat((input_trajectory[:, -keep_from_input:, :], 41 output[:, -model_output_size:,:]), dim=1)42 else:43 input_trajectory = output[:, -model_input_size:, :]44 output = torch.cat((output, self.model(input_trajectory)[0]), dim=1)45 return output[:, :rollout_size, :], trajectory[:, output_start_point:(output_start_point+rollout_size), :]46 47 def forward(self, batch):48 # one forward pass with the models default input output sizes49 # the starting point is randomized in here50 trajectory = batch['trajectory']51 start = self.get_start(batch, self.hparams.model_output_size)52 input_end_point = output_start_point = start + self.hparams.model_input_size53 input_trajectory = trajectory[:, start:input_end_point, :]54 target_trajectory = trajectory[:, output_start_point:(output_start_point + 55 self.hparams.model_output_size), :]56 output_trajectory, latents = self.model(input_trajectory)57 return output_trajectory, target_trajectory, latents58 def get_label_loss(self, batch, latents):59 labels = batch['labels']60 label_loss = self._compute_label_loss(labels, latents)61 return label_loss62 def training_step(self, train_batch, batch_idx):63 rec_loss = 0.064 label_loss = 0.065 for i in range(self.hparams.samples_per_batch_train):66 output_trajectory, target_trajectory, latents = self.forward(train_batch)67 rec_loss= rec_loss + self.reconstruction_loss(output_trajectory, target_trajectory)68 if (self.hparams.use_supervision):69 label_loss = label_loss + self.get_label_loss(train_batch, latents)70 rec_loss = rec_loss/self.hparams.samples_per_batch_train71 self.log('train/rec', rec_loss, prog_bar=True, on_step=False, on_epoch=True)72 if self.hparams.use_supervision:73 label_loss = label_loss/self.hparams.samples_per_batch_train74 self.log('train/label_loss', label_loss, prog_bar=True, on_step=False, on_epoch=True)75 train_loss = rec_loss + self.hparams.sup_multiplier * label_loss76 # Log longer losses77 if (batch_idx % self.hparams.log_freq) == 0:78 self.log_rec_losses(train_batch, 'train', self.val_rec_loss_sizes,79 on_step=False, on_epoch=True)80 return train_loss81 def validation_step(self, val_batch, batch_idx):82 for i in range(self.hparams.samples_per_batch_val):83 self.log_rec_losses(val_batch, 'val', self.val_rec_loss_sizes)84 if self.hparams.use_supervision:85 _, _, latents = self.forward(val_batch)86 label_loss = self.get_label_loss(val_batch, latents)87 self.log('val/label_loss', label_loss) 88 def test_step(self, test_batch, batch_idx, dataloader_idx=None):89 for i in range(self.hparams.samples_per_batch_test):90 self.log_rec_losses(test_batch, 'test', self.test_rec_loss_sizes)91 if self.hparams.use_supervision:92 _, _, latents = self.forward(test_batch)93 label_loss = self.get_label_loss(test_batch, latents)94 self.log('val/label_loss', label_loss)95if __name__ == '__main__':96 parser = ArgumentParser()97 parser.add_argument('--project_name', default='dummy')98 parser.add_argument('--model', default='mlp')99 parser.add_argument('--dataset', default='var_length')100 parser.add_argument('--dataset_dt', type=float, default=0.05)101 parser.add_argument('--coordinates', default='phase_space')102 parser.add_argument('--noise_std', type=float, default=0.0)103 # L1, MSE104 parser.add_argument('--rec_loss_type', type=str, default='L1')105 parser.add_argument('--model_nonlinearity', type=str, default='relu')106 parser.add_argument('--model_hidden_size', nargs='+', type=int, default=[400, 200])107 parser.add_argument('--model_input_size', type=int, default=10)108 parser.add_argument('--model_latent_size', type=int, default=5)109 parser.add_argument('--model_output_size', type=int, default=1)110 # SUPERVISION111 # sigmoid, sigmoid_parametrized, linear, linear_scaledled, BCE112 parser.add_argument('--use_supervision', type=str2bool, default=False)113 parser.add_argument('--sup_loss_type', type=str, default=None) 114 parser.add_argument('--sup_multiplier', type=float, default=None)115 parser.add_argument('--use_layer_norm', type=str2bool, default=None)116 parser.add_argument('--batch_size', type=int, default=16)117 parser.add_argument('--batch_size_val', type=int, default=16)118 parser.add_argument('--samples_per_batch_train', type=int, default=1)119 parser.add_argument('--samples_per_batch_val', type=int, default=1)120 parser.add_argument('--samples_per_batch_test', type=int, default=10)121 parser.add_argument('--use_random_start', type=str2bool)122 parser.add_argument('--learning_rate', type=float, default=1e-3)123 parser.add_argument('--model_dropout_pct', type=float, default=0.0)124 parser.add_argument('--scheduler_patience', type=int, default=20)125 parser.add_argument('--scheduler_factor', type=float, default=0.3)126 parser.add_argument('--scheduler_min_lr', type=float, default=1e-7)127 parser.add_argument('--scheduler_threshold', type=float, default=1e-5)128 parser.add_argument('--weight_decay', type=float, default=0)129 parser.add_argument('--max_epochs', type=int, default=2000)130 parser.add_argument('--monitor', type=str, default='val/rec/0001')131 parser.add_argument('--early_stopping_patience', type=int, default=60)132 parser.add_argument('--gpus', type=int, default=0)133 parser.add_argument('--num_workers', type=int, default=4)134 parser.add_argument('--seed', type=int, default=1)135 parser.add_argument('--use_wandb', type=str2bool, default=True)136 parser.add_argument('--log_freq', type=int, default=50)137 parser.add_argument('--fast_dev_run', type=str2bool, default=False)138 parser.add_argument('--debug', type=str2bool, default=False)139 parser.add_argument('--progress_bar_refresh_rate', type=int, default=100)140 hparams = parser.parse_args()141 print(hparams)142 143 seed_everything(hparams.seed)144 pl.seed_everything(hparams.seed)145 model = DynamicsMLP(**vars(hparams))146 print(model)147 if hparams.use_wandb:148 save_dir = os.path.join(os.environ['WANDB_DIR'], hparams.project_name)149 os.makedirs(save_dir, exist_ok=True)150 logger = pl.loggers.WandbLogger(project=hparams.project_name, save_dir=save_dir)151 logger.log_hyperparams(vars(hparams))152 if hparams.debug:153 logger.watch(model)154 checkpoint_dir = os.path.join(logger.experiment.dir, 'checkpoints/')155 else:156 # log_dir = os.path.join(os.environ['EXP_DIR'], 'tensorboard')157 log_dir = '~/tensorboard/'158 print(f'Using tensorboard from {log_dir}')159 os.makedirs(os.path.join(log_dir, hparams.project_name), exist_ok=True)160 experiment_name = f'in_{hparams.model_input_size}_out{hparams.model_output_size}'161 logger = pl.loggers.TensorBoardLogger(save_dir=log_dir, name=experiment_name)162 checkpoint_dir = logger.log_dir163 os.makedirs(checkpoint_dir, exist_ok=True)164 print(f'Checkpoint dir {checkpoint_dir}')165 lr_monitor_callback = pl.callbacks.LearningRateMonitor()166 early_stop_callback = pl.callbacks.EarlyStopping(monitor=hparams.monitor, min_delta=0.00, 167 patience=hparams.early_stopping_patience, verbose=True, mode='min')168 checkpoint_callback = pl.callbacks.ModelCheckpoint(169 dirpath=checkpoint_dir,170 filename='{epoch}',171 monitor=hparams.monitor, 172 save_top_k=1,verbose=True, mode='min',173 save_last=False)174 best_validation_callback = BestValidationCallback(hparams.monitor, hparams.use_wandb)175 test_end_callback = TestEndCallback(hparams.use_wandb)176 177 trainer = pl.Trainer.from_argparse_args(hparams, logger=logger,178 log_every_n_steps=1,179 callbacks=[checkpoint_callback,180 early_stop_callback, 181 lr_monitor_callback, 182 best_validation_callback,183 test_end_callback184 ],185 deterministic=True,186 progress_bar_refresh_rate=hparams.progress_bar_refresh_rate187 )188 trainer.fit(model)...

Full Screen

Full Screen

digitTrain.py

Source:digitTrain.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2"""3Created on Tue Mar 8 21:37:28 202245@author: HU6"""78import pytorch_lightning as pl9import torch10from torch import nn11import pickle12import torch.nn.functional as F13from torch.utils.data import Dataset14from torch.utils.data import DataLoader15import numpy as np16from argparse import ArgumentParser17import os1819class MyModule(nn.Module):20 def __init__(self, act_fn, input_size=11*7*3, num_classes=11, hidden_sizes=[256,64,16]):21 """22 Args:23 act_fn: Object of the activation function that should be used as non-linearity in the network.24 input_size: Size of the input images in pixels25 num_classes: Number of classes we want to predict26 hidden_sizes: A list of integers specifying the hidden layer sizes in the NN27 """28 super().__init__()2930 # Create the network based on the specified hidden sizes31 layers = []32 layer_sizes = [input_size] + hidden_sizes33 for layer_index in range(1, len(layer_sizes)):34 layers += [nn.Linear(layer_sizes[layer_index - 1], layer_sizes[layer_index]), act_fn]35 layers += [nn.Linear(layer_sizes[-1], num_classes)]36 # A module list registers a list of modules as submodules (e.g. for parameters)37 self.layers = nn.ModuleList(layers)3839 self.config = {40 "act_fn": act_fn.__class__.__name__,41 "input_size": input_size,42 "num_classes": num_classes,43 "hidden_sizes": hidden_sizes,44 }4546 def forward(self, x):47 x = x.view(x.size(0), -1)48 for layer in self.layers:49 x = layer(x)50 return x51 52class Identity(nn.Module):53 def forward(self, x):54 return x5556class MyDataSet(Dataset):57 def __init__(self, label, img):58 self.label = label59 self.img = img60 def __len__(self):61 return len(self.label)62 def __getitem__(self, idx):63 batch= { 64 'img': self.img[:,:,:,idx].astype('float32'),65 'label': self.label[idx]66 }67 return batch68 6970class MyModel(pl.LightningModule):71 def __init__(self):72 super().__init__()73 f = open('digitLabel.pickle',"rb")74 label = pickle.load(f)75 f = open('digitData.pickle',"rb")76 img = (pickle.load(f)/255-0.5)*277 78 trainLen = len(label)//10*879 valLen = len(label)//1080 81 self.train_dataset = MyDataSet(label[:trainLen], img[:,:,:,:trainLen])82 self.val_dataset = MyDataSet(label[trainLen:trainLen+valLen], 83 img[:,:,:,trainLen:trainLen+valLen])84 self.test_dataset = MyDataSet(label[trainLen+valLen:], 85 img[:,:,:,trainLen+valLen:])86 self.batch_size = 3287 self.model = MyModule(nn.Sigmoid())88 self.loss = nn.CrossEntropyLoss()89 90 def train_dataloader(self):91 return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=0)9293 def val_dataloader(self):94 return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=True)95 def test_dataloader(self):96 return DataLoader(self.test_dataset, batch_size=1, shuffle=False)97 def configure_optimizers(self):98 optimizer = torch.optim.Adam(self.parameters(),weight_decay=0.001)99 # try CosineAnnealingLR100 scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 101 mode='min', 102 factor=0.7,103 patience=20, 104 verbose=True,105 min_lr=0.0001)106 return {'optimizer': optimizer, 107 'lr_scheduler': scheduler,108 'monitor': 'val/rec'}109 def forward(self, batch):110 return self.model(batch['img']), batch['label']111 112 def training_step(self, train_batch, batch_idx):113 label = train_batch['label']114 out, target = self.forward(train_batch)115 rec_loss = self.loss(out, target)116 self.log('train/rec', rec_loss, on_step=True, on_epoch=True, prog_bar=False, logger=True)117 118 max_idx = torch.argmax(out,dim=1)119 acc = torch.mean((max_idx == label).type(torch.FloatTensor))120 self.log('train/acc', acc, on_step=True, on_epoch=True, prog_bar=False, logger=True)121 return rec_loss122 123 def validation_step(self, val_batch, batch_idx):124 label = val_batch['label']125 out, target = self.forward(val_batch)126 rec_loss = self.loss(out, target)127 self.log('val/rec', rec_loss, on_step=True, on_epoch=True, prog_bar=False, logger=True)128 129 max_idx = torch.argmax(out,dim=1)130 acc = torch.mean((max_idx == label).type(torch.FloatTensor))131 self.log('val/acc', acc, on_step=True, on_epoch=True, prog_bar=False, logger=True)132 return rec_loss133134 def test_step(self, test_batch, batch_idx, dataloader_idx=None):135 label = test_batch['label']136 out, target = self.forward(test_batch)137 rec_loss = self.loss(out, target)138 139 self.log('test/rec', rec_loss, on_step=True, on_epoch=True, prog_bar=False, logger=True)140 max_idx = torch.argmax(out,dim=1)141 acc = torch.mean((max_idx == label).type(torch.FloatTensor))142 self.log('test/acc', acc, on_step=True, on_epoch=True, prog_bar=False, logger=True)143144145class BestValidationCallback(pl.callbacks.base.Callback):146 # logs the best validation loss and other stuff147 def __init__(self, monitor):148 super().__init__()149 self.monitor = monitor150 self.best_val_loss = np.Inf151152153 def on_validation_end(self, trainer, pl_module):154 if trainer.running_sanity_check:155 return156 losses = trainer.logger_connector.callback_metrics157 print('cur: '+str(losses['val/rec']) + ' best: '+ str(self.best_val_loss))158 if (losses[self.monitor] < self.best_val_loss):159 self.best_val_loss = losses[self.monitor]160161162class TestEndCallback(pl.callbacks.base.Callback):163 # logs the best validation loss and other stuff164 def __init__(self):165 super().__init__()166167 def on_test_end(self, trainer, pl_module):168 acc = trainer.logger_connector.callback_metrics['test/acc']169 print('accuracy: ', acc)170171if __name__ == '__main__':172 pl.seed_everything(0)173 174 model = MyModel()175 lr_monitor_callback = pl.callbacks.LearningRateMonitor()176 logger = pl.loggers.TensorBoardLogger(save_dir='./log', name='digit')177 checkpoint_dir = logger.log_dir178 os.makedirs(checkpoint_dir, exist_ok=True)179 180 checkpoint_callback = pl.callbacks.ModelCheckpoint(181 dirpath=checkpoint_dir,182 filename='{epoch}',183 monitor='val/rec', 184 save_top_k=1,verbose=True, mode='min',185 save_last=False)186 best_validation_callback = BestValidationCallback('val/rec')187 test_end_callback = TestEndCallback()188 189 trainer = pl.Trainer(190 logger=logger,191 max_epochs = 1000,192 callbacks=[193 checkpoint_callback,194 lr_monitor_callback,195 best_validation_callback,196 test_end_callback,197 ],198 deterministic=True,199 check_val_every_n_epoch=1200 )201 202 trainer.fit(model) ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful