How to use test_stats_history method in locust

Best Python code snippet using locust

trainer.py

Source:trainer.py Github

copy

Full Screen

1import logging2import time3from pathlib import Path4import torch5from deepbond import models6from deepbond import optimizer7from deepbond import scheduler8from deepbond.reporter import Reporter9from deepbond.stats import Stats10logger = logging.getLogger(__name__)11class Trainer:12 def __init__(13 self,14 train_iter,15 model,16 optimizer,17 scheduler,18 options,19 dev_iter=None,20 test_iter=None21 ):22 self.model = model23 self.train_iter = train_iter24 self.dev_iter = dev_iter25 self.test_iter = test_iter26 self.optimizer = optimizer27 self.scheduler = scheduler28 self.epochs = options.epochs29 self.output_dir = options.output_dir30 self.dev_checkpoint_epochs = options.dev_checkpoint_epochs31 self.save_checkpoint_epochs = options.save_checkpoint_epochs32 self.save_best_only = options.save_best_only33 self.early_stopping_patience = options.early_stopping_patience34 self.restore_best_model = options.restore_best_model35 self.current_epoch = 136 self.train_stats_history = []37 self.dev_stats_history = []38 self.test_stats_history = []39 self.final_report = options.final_report40 tags_vocab = train_iter.dataset.fields['tags'].vocab.stoi41 self.train_stats = Stats(tags_vocab)42 self.dev_stats = Stats(tags_vocab)43 self.test_stats = Stats(tags_vocab)44 self.reporter = Reporter(options.output_dir, options.tensorboard)45 def train(self):46 # Perform an evaluation on dev set if it is available47 if self.dev_iter is not None:48 logger.info('Evaluating before training...')49 self.reporter.set_epoch(0)50 self.dev_epoch()51 # Perform an evaluation on test set if it is available52 if self.test_iter is not None:53 logger.info('Testing before training...')54 self.reporter.set_epoch(0)55 self.test_epoch()56 start_time = time.time()57 for epoch in range(self.current_epoch, self.epochs + 1):58 logger.info('Epoch {} of {}'.format(epoch, self.epochs))59 self.reporter.set_epoch(epoch)60 self.current_epoch = epoch61 # Train a single epoch62 logger.info('Training...')63 self.train_epoch()64 # Perform an evaluation on dev set if it is available65 if self.dev_iter is not None:66 # Only perform if a checkpoint was reached67 if (self.dev_checkpoint_epochs > 068 and epoch % self.dev_checkpoint_epochs == 0):69 logger.info('Evaluating...')70 self.dev_epoch()71 # Perform an evaluation on test set if it is available72 if self.test_iter is not None:73 logger.info('Testing...')74 self.test_epoch()75 # Only save if an improvement has occurred76 if self.save_best_only and self.dev_iter is not None:77 if self.dev_stats.best_prec_rec_f1.epoch == epoch:78 logger.info('F1 improved on epoch {}'.format(epoch))79 self.save(epoch)80 else:81 # Otherwise, save if a checkpoint was reached82 if (self.save_checkpoint_epochs > 083 and epoch % self.save_checkpoint_epochs == 0):84 self.save(epoch)85 # Stop training before the total number of epochs86 if self.early_stopping_patience > 0 and self.dev_iter is not None:87 # Only stop if the desired patience epochs was reached88 passed_epochs = epoch - self.dev_stats.best_prec_rec_f1.epoch89 if passed_epochs == self.early_stopping_patience:90 logger.info('Training stopped! No improvements on F1 '91 'after {} epochs'.format(passed_epochs))92 if self.restore_best_model:93 if self.dev_stats.best_prec_rec_f1.epoch < epoch:94 self.restore_epoch(self.dev_stats.best_prec_rec_f1.epoch) # NOQA95 break96 # Restore best model if early stopping didnt occur for final epoch97 if epoch == self.epochs and self.dev_iter is not None:98 if self.restore_best_model:99 if self.dev_stats.best_prec_rec_f1.epoch < epoch:100 self.restore_epoch(self.dev_stats.best_prec_rec_f1.epoch) # NOQA101 elapsed = time.time() - start_time102 hms = time.strftime("%Hh:%Mm:%Ss", time.gmtime(elapsed))103 logger.info('Training ended after {}'.format(hms))104 if self.final_report:105 logger.info('Training final report: ')106 self.reporter.report_stats_history(self.train_stats_history)107 if self.dev_iter:108 logger.info('Dev final report: ')109 self.reporter.report_stats_history(110 self.dev_stats_history, start=0111 )112 if self.test_iter:113 logger.info('Test final report: ')114 self.reporter.report_stats_history(115 self.test_stats_history, start=0116 )117 self.reporter.close()118 def train_epoch(self):119 self.reporter.set_mode('train')120 self.train_stats.reset()121 self._train()122 self.train_stats_history.append(self.train_stats.to_dict())123 self.reporter.report_stats(self.train_stats.to_dict())124 def dev_epoch(self):125 self.reporter.set_mode('dev')126 self.dev_stats.reset()127 self._eval(self.dev_iter, self.dev_stats)128 self.dev_stats_history.append(self.dev_stats.to_dict())129 self.reporter.report_stats(self.dev_stats.to_dict())130 def test_epoch(self):131 self.reporter.set_mode('test')132 self.test_stats.reset()133 self._eval(self.test_iter, self.test_stats)134 self.test_stats_history.append(self.test_stats.to_dict())135 self.reporter.report_stats(self.test_stats.to_dict())136 def _train(self):137 self.model.train()138 for i, batch in enumerate(self.train_iter, start=1):139 # basic training steps:140 self.model.zero_grad()141 pred = self.model(batch)142 loss = self.model.loss(pred, batch.tags)143 loss.backward()144 self.optimizer.step()145 # keep stats object updated:146 pred_tags = self.model.predict_classes(batch)147 self.train_stats.update(loss.item(), pred_tags, batch.tags)148 # report current loss to the user:149 acum_loss = self.train_stats.get_loss()150 self.reporter.report_progress(i, len(self.train_iter), acum_loss)151 self.train_stats.calc(self.current_epoch)152 # scheduler.step() after training153 self.scheduler.step()154 def _eval(self, ds_iterator, stats):155 self.model.eval()156 with torch.no_grad():157 for i, batch in enumerate(ds_iterator, start=1):158 # basic prediction steps:159 pred = self.model(batch)160 loss = self.model.loss(pred, batch.tags)161 # keep stats object updated:162 pred_tags = self.model.predict_classes(batch)163 stats.update(loss.item(), pred_tags, batch.tags)164 # report current loss to the user:165 acum_loss = stats.get_loss()166 self.reporter.report_progress(i, len(ds_iterator), acum_loss)167 stats.calc(self.current_epoch)168 def save(self, current_epoch):169 epoch_dir = 'epoch_{}'.format(current_epoch)170 output_path = Path(self.output_dir, epoch_dir)171 output_path.mkdir(exist_ok=True)172 logger.info('Saving training state to {}'.format(output_path))173 models.save(output_path, self.model)174 optimizer.save(output_path, self.optimizer)175 scheduler.save(output_path, self.scheduler)176 def load(self, directory):177 logger.info('Loading training state from {}'.format(directory))178 models.load_state(directory, self.model)179 optimizer.load_state(directory, self.optimizer)180 scheduler.load_state(directory, self.scheduler)181 def restore_epoch(self, epoch):182 epoch_dir = 'epoch_{}'.format(epoch)183 self.load(str(Path(self.output_dir, epoch_dir)))184 def resume(self, epoch):185 self.restore_epoch(epoch)186 self.current_epoch = epoch...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run locust automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful