How to use load_from_dir method in autotest

Best Python code snippet using autotest_python

train.py

Source:train.py Github

copy

Full Screen

1import warnings2warnings.filterwarnings('ignore')3import os4import logging5import pickle6import importlib7import functools8from pathlib import Path9from argparse import Namespace10import torch11from texar.torch.run import Executor, metric, cond, action12from texar.torch.run.metric.summary import Average, RunningAverage13from torch.optim.adam import Adam14from torch.optim.lr_scheduler import LambdaLR15from typing import Any, Callable, List, Optional, Tuple16from .data import create_texar_dataset, load_tokenizer, TexarDataset17from .construct import create_model, load_hparams18from .metric import RawMetric19from .args import get_train_parser20from .model.lm import LM21device = torch.device("cuda" if torch.cuda.is_available() else "cpu")22torch.manual_seed(0)23logger = logging.getLogger(__name__)24def get_lr_scaler(step: int,25 warmup_steps: int):26 """Noam learning rate schedule described in27 https://arxiv.org/pdf/1706.03762.pdf.28 """29 if step <= warmup_steps:30 lr_scaler = min(1.0, step / warmup_steps) # Linear warmup31 else:32 lr_scaler = 0.9 ** (step / warmup_steps) # Exponential decay33 return lr_scaler34def create_optimizer_and_scheduler(model: LM,35 learning_rate: float=0.001,36 dynamic_lr: bool=False,37 warmup_steps: int=400038 ) -> Tuple[Adam, LambdaLR]:39 # Define optimizer and scheduler40 if dynamic_lr:41 lr_scaler_fn = functools.partial(get_lr_scaler, warmup_steps=warmup_steps)42 else:43 lr_scaler_fn = lambda _: 1.044 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,45 betas=(0.9, 0.998), eps=1e-9)46 lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_scaler_fn)47 return optimizer, lr_scheduler48def load_metrics(pg_metric_names: List[Any],49 eval_metric_names: List[Any],50 text_decode_fn: Callable51 ) -> Tuple[List[Any], List[Any]]:52 metric_fns_map = {metric_name:53 RawMetric.get_metric_from_name(metric_name)(decode_fn=text_decode_fn)54 for metric_name in set(pg_metric_names + eval_metric_names)}55 pg_metric_fns = [metric_fns_map[metric_name] for metric_name in pg_metric_names]56 eval_metric_fns = [metric_fns_map[metric_name] for metric_name in eval_metric_names]57 return pg_metric_fns, eval_metric_fns58def create_texar_metrics(pg_metric_fns: List[Any],59 eval_metric_fns: List[Any],60 log_iterations: int=10061 ) -> Tuple[List[RunningAverage], List[Average]]:62 train_metrics = [metric.RunningAverage(log_iterations),63 metric.RunningAverage(log_iterations,64 pred_name='mle_loss'),65 *(metric.RunningAverage(log_iterations,66 pred_name='{}_loss'.format(metric_fn.metric_name))67 for metric_fn in pg_metric_fns)]68 if eval_metric_fns:69 eval_metrics = [*(metric.Average(pred_name=metric_fn.metric_name,70 higher_is_better=metric_fn.higher_is_better)71 for metric_fn in eval_metric_fns),72 metric.Average(pred_name='mle_loss')]73 else:74 eval_metrics = [metric.Average(pred_name='mle_loss')]75 return train_metrics, eval_metrics76def create_executor(model: LM,77 save_dir: str,78 train_data: TexarDataset,79 eval_data: TexarDataset,80 optimizer: Adam,81 lr_scheduler: LambdaLR,82 load_from_dir: Optional[str]=None,83 train_metrics: List[RunningAverage]=[],84 eval_metrics: List[Average]=[],85 max_grad_norm: float=5.0,86 accum_steps: int=1,87 max_epochs: int=100,88 patience: int=2,89 valid_iterations: int=1000,90 valid_epoch_end: bool=False,91 log_iterations: int=10092 ) -> Executor:93 # Use Executor API to train and validate94 executor = Executor(model=model,95 train_data=train_data,96 valid_data=eval_data,97 test_data=eval_data,98 optimizer=optimizer,99 lr_scheduler=lr_scheduler,100 grad_clip=max_grad_norm,101 num_iters_per_update=accum_steps,102 stop_training_on=cond.epoch(max_epochs),103 log_every=cond.iteration(log_iterations),104 validate_mode='eval',105 test_mode='eval',106 validate_every=([cond.epoch(1),107 cond.iteration(valid_iterations)] if valid_epoch_end108 else [cond.iteration(valid_iterations)]),109 train_metrics=train_metrics + [metric.LR(optimizer)],110 valid_metrics=eval_metrics,111 save_every=cond.validation(better=True),112 plateau_condition=[113 cond.consecutive(cond.validation(better=False), 1)],114 action_on_plateau=[115 action.early_stop(patience=patience)],116 max_to_keep=1,117 checkpoint_dir=save_dir)118 if load_from_dir is not None:119 # Seems like a bug where checkpoint is only looked for in executor.checkpoint_dir120 # Get around this by temporarily changing checkpoint_dir to args.load_from_dir then changing it back to args.save_dir121 executor.checkpoint_dir = Path(load_from_dir)122 loaded_checkpoint = executor.load()123 logger.info("Initialized model from checkpoint {}".format(loaded_checkpoint))124 executor.checkpoint_dir = Path(save_dir)125 return executor126def train(args: Namespace,127 train_src_texts: List[str],128 train_tgt_texts: List[str],129 eval_src_texts: List[str],130 eval_tgt_texts: List[str],131 train_ref_texts: Optional[List[str]]=None,132 eval_ref_texts: Optional[List[str]]=None133 ) -> None:134 if args.load_from_dir:135 logger.info(("Loading model configuration from {}. " +136 "All hyperparameter settings will be read from here " +137 "and will override any settings provided as command-line arguments.").format(138 os.path.join(args.load_from_dir)))139 model_hparams = load_hparams(path=args.load_from_dir)140 else:141 assert args.config_file is not None142 model_hparams = load_hparams(path=args.config_file)143 tokenizer = load_tokenizer(model_hparams['tokenizer'])144 # Make dataset objects for Texar API145 texar_train_data, texar_eval_data = create_texar_dataset(146 tokenizer=tokenizer,147 train_src_texts=train_src_texts,148 train_tgt_texts=train_tgt_texts,149 eval_src_texts=eval_src_texts,150 eval_tgt_texts=eval_tgt_texts,151 train_ref_texts=train_ref_texts,152 eval_ref_texts=eval_ref_texts,153 max_src_length=args.max_src_length,154 max_tgt_length=args.max_tgt_length,155 batch_size=args.batch_size156 )157 # Initialize the policy gradient and evaluation metrics based on given metric names158 pg_metric_fns, eval_metric_fns = load_metrics(159 pg_metric_names=args.pg_metrics,160 eval_metric_names=args.eval_metrics,161 text_decode_fn=(lambda text:162 tokenizer.map_id_to_text(text, skip_special_tokens=True))163 )164 # Initialize model with properties and policy gradient/evaluation functions165 model = create_model(hparams=model_hparams,166 save_dir=args.save_dir)167 model.set_pg_and_eval_fns(pg_metric_fns, eval_metric_fns)168 # Create optimizer and LR scheduler169 optimizer, lr_scheduler = create_optimizer_and_scheduler(model,170 learning_rate=args.learning_rate,171 dynamic_lr=args.dynamic_lr,172 warmup_steps=args.warmup_steps)173 # Wrap metrics inside Texar Metric API to be used by Executor174 (texar_train_metrics,175 texar_eval_metrics) = create_texar_metrics(pg_metric_fns=pg_metric_fns,176 eval_metric_fns=eval_metric_fns,177 log_iterations=args.log_iterations)178 # Create Texar Executor179 executor = create_executor(model=model,180 save_dir=args.save_dir,181 train_data=texar_train_data,182 eval_data=texar_eval_data,183 optimizer=optimizer,184 lr_scheduler=lr_scheduler,185 load_from_dir=args.load_from_dir,186 train_metrics=texar_train_metrics,187 eval_metrics=texar_eval_metrics,188 max_grad_norm=args.max_grad_norm,189 accum_steps=args.accum_steps,190 max_epochs=args.max_epochs,191 patience=args.patience,192 valid_iterations=args.valid_iterations,193 valid_epoch_end=args.valid_epoch_end,194 log_iterations=args.log_iterations)195 logger.info("Validation result prior to training:")196 executor.test()...

Full Screen

Full Screen

evaluation.py

Source:evaluation.py Github

copy

Full Screen

...6from torch_tools.visualization import to_image7from visualization import interpolate8from loading import load_from_dir9# %matplotlib inline10# deformator, G, shift_predictor = load_from_dir(11# './models/pretrained/deformators/SN_MNIST/',12# G_weights='./models/pretrained/generators/SN_MNIST/')13deformator, G, shift_predictor = load_from_dir(14 './models/pretrained/deformators/SN_Anime/',15 G_weights='./models/pretrained/generators/SN_Anime/')16# deformator, G, shift_predictor = load_from_dir(17# './models/pretrained/deformators/BigGAN/',18# G_weights='./models/pretrained/generators/BigGAN/G_ema.pth')19# deformator, G, shift_predictor = load_from_dir(20# './models/pretrained/deformators/ProgGAN/',21# G_weights='./models/pretrained/generators/ProgGAN/100_celeb_hq_network-snapshot-010403.pth')22# deformator, G, shift_predictor = load_from_dir(23# './models/pretrained/deformators/StyleGAN2/',24# G_weights='./models/pretrained/generators/StyleGAN2/stylegan2-ffhq-config-f.pt')25discovered_annotation = ''26for d in deformator.annotation.items():27 discovered_annotation += '{}: {}\n'.format(d[0], d[1])28print('human-annotated directions:\n' + discovered_annotation)29rows = 830plt.figure(figsize=(5, rows), dpi=250)31# set desired class for conditional GAN32# if is_conditional(G):33# G.set_classes(12)34annotated = list(deformator.annotation.values())35inspection_dim = annotated[0]36# zs = torch.randn([rows, G.dim_z] if type(G.dim_z) == int else [rows] + G.dim_z, device='cuda')...

Full Screen

Full Screen

visualize.py

Source:visualize.py Github

copy

Full Screen

1import time2import math3import os4import sys5if len(sys.argv) < 2:6 print('Usage: {} datasource'.format(sys.argv[0]))7 print('\tAvailable datasources: boxes, minipong, mediumpong...')8 exit(1)9import numpy as np10import torch11import torch.nn as nn12import torch.nn.functional as F13import torch.optim as optim14import imutil15from logutil import TimeSeries16from tqdm import tqdm17from spatial_recurrent import CSRN18from coordconv import CoordConv2d19from higgins import higgins_metric20from importlib import import_module21datasource = import_module('envs.' + sys.argv[1])22import models23def main():24 batch_size = 1625 latent_dim = 1226 true_latent_dim = 427 num_actions = 428 encoder = models.Encoder(latent_dim)29 decoder = models.Decoder(latent_dim)30 transition = models.Transition(latent_dim, num_actions)31 blur = models.GaussianSmoothing(channels=3, kernel_size=11, sigma=4.)32 higgins_scores = []33 #load_from_dir = '/mnt/nfs/experiments/demo_2018_12_12/scm-gan_81bd12cd'34 load_from_dir = '.'35 print('Loading models from directory {}'.format(load_from_dir))36 encoder.load_state_dict(torch.load(os.path.join(load_from_dir, 'model-encoder.pth')))37 decoder.load_state_dict(torch.load(os.path.join(load_from_dir, 'model-decoder.pth')))38 transition.load_state_dict(torch.load(os.path.join(load_from_dir, 'model-transition.pth')))39 encoder.eval()40 decoder.eval()41 transition.eval()42 for model in (encoder, decoder, transition):43 for child in model.children():44 if type(child) == nn.BatchNorm2d or type(child) == nn.BatchNorm1d:45 child.momentum = 046 states, rewards, dones, actions = datasource.get_trajectories(batch_size, timesteps=1)47 states = torch.Tensor(states).cuda()48 # Reconstruct the first timestep49 reconstructed = decoder(encoder(states[:, 0]))50 imutil.show(reconstructed)51if __name__ == '__main__':...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful