How to use test_tds method in Molotov

Best Python code snippet using molotov_python

main_cifar.py

Source:main_cifar.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2""" Code for training and evaluating Self-Explaining Neural Networks.3Copyright (C) 2018 David Alvarez-Melis <dalvmel@mit.edu>4This program is free software: you can redistribute it and/or modify5it under the terms of the GNU General Public License as published by6the Free Software Foundation, either version 3 of the License,7(at your option) any later version.8This program is distributed in the hope that it will be useful,9but WITHOUT ANY WARRANTY; without even the implied warranty of10MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the11GNU General Public License for more details.12You should have received a copy of the GNU General Public License13along with this program. If not, see <https://www.gnu.org/licenses/>.14"""15# Standard Imports16import sys, os17import numpy as np18import pdb19import pickle20import argparse21import operator22import matplotlib23import matplotlib.pyplot as plt24# Torch Imports25import torch26from torch.utils.data import TensorDataset27from torch.autograd import Variable28import torchvision29from torchvision import transforms30from torchvision.datasets import CIFAR1031from torch.utils.data.sampler import SubsetRandomSampler32import torch.utils.data.dataloader as dataloader33# Imports from my other repos34from robust_interpret.explainers import gsenn_wrapper35from robust_interpret.utils import lipschitz_boxplot, lipschitz_argmax_plot36# Local imports37from SENN.arglist import get_senn_parser #parse_args as parse_senn_args38from SENN.models import GSENN39from SENN.conceptizers import image_fcc_conceptizer, image_cnn_conceptizer, input_conceptizer40from SENN.parametrizers import image_parametrizer, torchvision_parametrizer, vgg_parametrizer41from SENN.aggregators import linear_scalar_aggregator, additive_scalar_aggregator42from SENN.trainers import HLearningClassTrainer, VanillaClassTrainer, GradPenaltyTrainer43from SENN.utils import plot_theta_stability, generate_dir_names, noise_stability_plots, concept_grid44from SENN.eval_utils import estimate_dataset_lipschitz45def load_cifar_data(valid_size=0.1, shuffle=True, resize = None, random_seed=2008, batch_size = 64,46 num_workers = 1):47 """48 We return train and test for plots and post-training experiments49 """50 transf_seq = [51 transforms.ToTensor(),52 transforms.Normalize(mean=[0.485, 0.456, 0.406],53 std=[0.229, 0.224, 0.225])54 ]55 if resize and (resize[0] != 32 or resize[1] != 32):56 transf_seq.insert(0, transforms.Resize(resize))57 transform = transforms.Compose(transf_seq)58 # normalized according to pytorch torchvision guidelines https://chsasank.github.io/vision/models.html59 train = CIFAR10('data/CIFAR', train=True, download=True, transform=transform)60 test = CIFAR10('data/CIFAR', train=False, download=True, transform=transform)61 num_train = len(train)62 indices = list(range(num_train))63 split = int(np.floor(valid_size * num_train))64 train_idx, valid_idx = indices[split:], indices[:split]65 train_sampler = SubsetRandomSampler(train_idx)66 valid_sampler = SubsetRandomSampler(valid_idx)67 if shuffle == True:68 np.random.seed(random_seed)69 np.random.shuffle(indices)70 # Create DataLoader71 dataloader_args = dict(batch_size=batch_size,num_workers=num_workers)72 train_loader = dataloader.DataLoader(train, sampler=train_sampler, **dataloader_args)73 valid_loader = dataloader.DataLoader(train, sampler=valid_sampler, **dataloader_args)74 dataloader_args['shuffle'] = False75 test_loader = dataloader.DataLoader(test, **dataloader_args)76 return train_loader, valid_loader, test_loader, train, test77def parse_args():78 senn_parser = get_senn_parser()79 ### Local ones80 parser = argparse.ArgumentParser(parents =[senn_parser],add_help=False,81 description='Interpteratbility robustness evaluation on MNIST')82 # #setup83 parser.add_argument('-d','--datasets', nargs='+',84 default = ['heart', 'ionosphere', 'breast-cancer','wine','heart',85 'glass','diabetes','yeast','leukemia','abalone'], help='<Required> Set flag')86 parser.add_argument('--lip_calls', type=int, default=10,87 help='ncalls for bayes opt gp method in Lipschitz estimation')88 parser.add_argument('--lip_eps', type=float, default=0.01,89 help='eps for Lipschitz estimation')90 parser.add_argument('--lip_points', type=int, default=100,91 help='sample size for dataset Lipschitz estimation')92 parser.add_argument('--optim', type=str, default='gp',93 help='black-box optimization method')94 #####95 args = parser.parse_args()96 print("\nParameters:")97 for attr, value in sorted(args.__dict__.items()):98 print("\t{}={}".format(attr.upper(), value))99 return args100def main():101 args = parse_args()102 np.random.seed(args.seed)103 torch.manual_seed(args.seed)104 args.nclasses = 10105 args.theta_dim = args.nclasses106 if (args.theta_arch == 'simple') or ('vgg' in args.theta_arch):107 H, W = 32, 32108 else:109 # Need to resize to have access to torchvision's models110 H, W = 224, 224111 args.input_dim = H*W112 model_path, log_path, results_path = generate_dir_names('cifar', args)113 train_loader, valid_loader, test_loader, train_tds, test_tds = load_cifar_data(114 batch_size=args.batch_size,num_workers=args.num_workers,115 resize=(H,W)116 )117 if args.h_type == 'input':118 conceptizer = input_conceptizer()119 args.nconcepts = args.input_dim + int(not args.nobias)120 elif args.h_type == 'cnn':121 122 # biase. They treat it like any other concept.123 #args.nconcepts += int(not args.nobias)124 conceptizer = image_cnn_conceptizer(args.input_dim, args.nconcepts, args.concept_dim, nchannel = 3) #, sparsity = sparsity_l)125 else:126 #args.nconcepts += int(not args.nobias)127 conceptizer = image_fcc_conceptizer(args.input_dim, args.nconcepts, args.concept_dim, nchannel = 3) #, sparsity = sparsity_l)128 if args.theta_arch == 'simple':129 parametrizer = image_parametrizer(args.input_dim, args.nconcepts, args.theta_dim, nchannel = 3, only_positive = args.positive_theta)130 elif 'vgg' in args.theta_arch:131 parametrizer = vgg_parametrizer(args.input_dim, args.nconcepts, args.theta_dim, arch = args.theta_arch, nchannel = 3, only_positive = args.positive_theta) #torchvision.models.alexnet(num_classes = args.nconcepts*args.theta_dim)132 else:133 parametrizer = torchvision_parametrizer(args.input_dim, args.nconcepts, args.theta_dim, arch = args.theta_arch, nchannel = 3, only_positive = args.positive_theta) #torchvision.models.alexnet(num_classes = args.nconcepts*args.theta_dim)134 aggregator = additive_scalar_aggregator(args.concept_dim, args.nclasses)135 model = GSENN(conceptizer, parametrizer, aggregator) #, learn_h = args.train_h)136 # if not args.train and args.load_model:137 # checkpoint = torch.load(os.path.join(model_path,'model_best.pth.tar'), map_location=lambda storage, loc: storage)138 # checkpoint.keys()139 # model = checkpoint['model']140 #141 #142 if args.theta_reg_type in ['unreg','none', None]:143 trainer = VanillaClassTrainer(model, args)144 elif args.theta_reg_type == 'grad1':145 trainer = GradPenaltyTrainer(model, args, typ = 1)146 elif args.theta_reg_type == 'grad2':147 trainer = GradPenaltyTrainer(model, args, typ = 2)148 elif args.theta_reg_type == 'grad3':149 trainer = GradPenaltyTrainer(model, args, typ = 3)150 elif args.theta_reg_type == 'crosslip':151 trainer = CLPenaltyTrainer(model, args)152 else:153 raise ValueError('Unrecoginzed theta_reg_type')154 if args.train or not args.load_model or (not os.path.isfile(os.path.join(model_path,'model_best.pth.tar'))):155 trainer.train(train_loader, valid_loader, epochs = args.epochs, save_path = model_path)156 trainer.plot_losses(save_path=results_path)157 else:158 checkpoint = torch.load(os.path.join(model_path,'model_best.pth.tar'), map_location=lambda storage, loc: storage)159 checkpoint.keys()160 model = checkpoint['model']161 trainer = VanillaClassTrainer(model, args) # arbtrary trained, only need to compuyte val acc162 #trainer.validate(test_loader, fold = 'test')163 model.eval()164 All_Results = {}165 ### 0. Concept Grid for Visualization166 #concept_grid(model, test_loader, top_k = 10, cuda = args.cuda, save_path = results_path + '/concept_grid.pdf')167 ### 1. Single point lipshiz estimate via black box optim (for fair comparison)168 # with other methods in which we have to use BB optimization.169 features = None170 classes = [str(i) for i in range(10)]171 expl = gsenn_wrapper(model,172 mode = 'classification',173 input_type = 'image',174 multiclass=True,175 feature_names = features,176 class_names = classes,177 train_data = train_loader,178 skip_bias = True,179 verbose = False)180 ### Debug single input181 # x = next(iter(train_tds))[0]182 # attr = expl(x, show_plot = False)183 # pdb.set_trace()184 # #### Debug multi input185 # x = next(iter(test_loader))[0] # Transformed186 # x_raw = test_loader.dataset.test_data[:args.batch_size,:,:]187 # attr = expl(x, x_raw = x_raw, show_plot = True)188 # #pdb.set_trace()189 # #### Debug argmaz plot_theta_stability190 if args.h_type == 'input':191 x = next(iter(test_tds))[0].numpy()192 y = next(iter(test_tds))[0].numpy()193 x_raw = (test_tds.test_data[0].float()/255).numpy()194 y_raw = revert_to_raw(x)195 att_x = expl(x, show_plot = False)196 att_y = expl(y, show_plot = False)197 lip = 1198 lipschitz_argmax_plot(x_raw, y_raw, att_x,att_y, lip)# save_path=fpath)199 #pdb.set_trace()200 ### 2. Single example lipschitz estimate with Black Box201 do_bb_stability_example = True202 if do_bb_stability_example:203 print('**** Performing lipschitz estimation for a single point ****')204 idx = 0205 print('Example index: {}'.format(idx))206 #x = train_tds[idx][0].view(1,28,28).numpy()207 x = next(iter(test_tds))[0].numpy()208 #x_raw = (test_tds.test_data[0].float()/255).numpy()209 x_raw = (test_tds.test_data[0]/255)210 #x_raw = next(iter(train_tds))[0]211 # args.optim = 'gp'212 # args.lip_eps = 0.1213 # args.lip_calls = 10214 Results = {}215 lip, argmax = expl.local_lipschitz_estimate(x, bound_type='box_std',216 optim=args.optim,217 eps=args.lip_eps,218 n_calls=4*args.lip_calls,219 njobs = 1,220 verbose=2)221 #pdb.set_trace()222 Results['lip_argmax'] = (x, argmax, lip)223 # .reshape(inputs.shape[0], inputs.shape[1], -1)224 att = expl(x, None, show_plot=False)#.squeeze()225 # .reshape(inputs.shape[0], inputs.shape[1], -1)226 att_argmax = expl(argmax, None, show_plot=False)#.squeeze()227 #pdb.set_trace()228 Argmax_dict = {'lip': lip, 'argmax': argmax, 'x': x}229 fpath = os.path.join(results_path, 'argmax_lip_gp_senn.pdf')230 if args.h_type == 'input':231 lipschitz_argmax_plot(x_raw, revert_to_raw(argmax), att, att_argmax, lip, save_path=fpath)232 pickle.dump(Argmax_dict, open(233 results_path + '/argmax_lip_gp_senn.pkl', "wb"))234 #noise_stability_plots(model, test_tds, cuda = args.cuda, save_path = results_path)235 ### 3. Local lipschitz estimate over multiple samples with Black BOx Optim236 do_bb_stability = True237 if do_bb_stability:238 print('**** Performing black-box lipschitz estimation over subset of dataset ****')239 maxpoints = 20240 #valid_loader 0 it's shuffled, so it's like doing random choice241 mini_test = next(iter(valid_loader))[0][:maxpoints].numpy()242 lips = expl.estimate_dataset_lipschitz(mini_test,243 n_jobs=-1, bound_type='box_std',244 eps=args.lip_eps, optim=args.optim,245 n_calls=args.lip_calls, verbose=2)246 Stability_dict = {'lips': lips}247 pickle.dump(Stability_dict, open(results_path + '_stability_blackbox.pkl', "wb"))248 All_Results['stability_blackbox'] = lips249 pickle.dump(All_Results, open(results_path + '_combined_metrics.pkl'.format(dataname), "wb"))250 251 # args.epoch_stats = epoch_stats252 # save_path = args.results_path253 # print("Save train/dev results to", save_path)254 # args_dict = vars(args)255 # pickle.dump(args_dict, open(save_path,'wb') )256if __name__ == '__main__':...

Full Screen

Full Screen

main_mnist.py

Source:main_mnist.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2""" Code for training and evaluating Self-Explaining Neural Networks.3Copyright (C) 2018 David Alvarez-Melis <dalvmel@mit.edu>4This program is free software: you can redistribute it and/or modify5it under the terms of the GNU General Public License as published by6the Free Software Foundation, either version 3 of the License,7(at your option) any later version.8This program is distributed in the hope that it will be useful,9but WITHOUT ANY WARRANTY; without even the implied warranty of10MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the11GNU General Public License for more details.12You should have received a copy of the GNU General Public License13along with this program. If not, see <https://www.gnu.org/licenses/>.14"""15# Standard Imports16import sys, os17import numpy as np18import pdb19import pickle20import argparse21import operator22import matplotlib23import matplotlib.pyplot as plt24# Torch-related25import torch26from torch.utils.data import TensorDataset27from torch.autograd import Variable28import torchvision29from torchvision import transforms30from torchvision.datasets import MNIST31from torch.utils.data.sampler import SubsetRandomSampler32import torch.utils.data.dataloader as dataloader33# Local imports34from SENN.arglist import get_senn_parser #parse_args as parse_senn_args35from SENN.models import GSENN36from SENN.conceptizers import image_fcc_conceptizer, image_cnn_conceptizer, input_conceptizer37from SENN.parametrizers import image_parametrizer38from SENN.aggregators import linear_scalar_aggregator, additive_scalar_aggregator39from SENN.trainers import HLearningClassTrainer, VanillaClassTrainer, GradPenaltyTrainer40from SENN.utils import plot_theta_stability, generate_dir_names, noise_stability_plots, concept_grid, plot_digit, plot_concept_vis41from SENN.eval_utils import estimate_dataset_lipschitz42from robust_interpret.explainers import gsenn_wrapper43from robust_interpret.utils import lipschitz_boxplot, lipschitz_argmax_plot44def revert_to_raw(t):45 return ((t*.3081) + .1307)46def load_mnist_data(valid_size=0.1, shuffle=True, random_seed=2008, batch_size = 64,47 num_workers = 1):48 """49 We return train and test for plots and post-training experiments50 """51 transform = transforms.Compose([52 transforms.ToTensor(),53 transforms.Normalize((0.1307,), (0.3081,))54 ])55 train = MNIST('data/MNIST', train=True, download=True, transform=transform)56 test = MNIST('data/MNIST', train=False, download=True, transform=transform)57 num_train = len(train)58 indices = list(range(num_train))59 split = int(np.floor(valid_size * num_train))60 train_idx, valid_idx = indices[split:], indices[:split]61 train_sampler = SubsetRandomSampler(train_idx)62 valid_sampler = SubsetRandomSampler(valid_idx)63 if shuffle == True:64 np.random.seed(random_seed)65 np.random.shuffle(indices)66 # Create DataLoader67 dataloader_args = dict(batch_size=batch_size,num_workers=num_workers)68 train_loader = dataloader.DataLoader(train, sampler=train_sampler, **dataloader_args)69 valid_loader = dataloader.DataLoader(train, sampler=valid_sampler, **dataloader_args)70 dataloader_args['shuffle'] = False71 test_loader = dataloader.DataLoader(test, **dataloader_args)72 return train_loader, valid_loader, test_loader, train, test73def parse_args():74 senn_parser = get_senn_parser()75 ### Local ones76 parser = argparse.ArgumentParser(parents =[senn_parser],add_help=False,77 description='Interpteratbility robustness evaluation on MNIST')78 # #setup79 parser.add_argument('-d','--datasets', nargs='+',80 default = ['heart', 'ionosphere', 'breast-cancer','wine','heart',81 'glass','diabetes','yeast','leukemia','abalone'], help='<Required> Set flag')82 parser.add_argument('--lip_calls', type=int, default=10,83 help='ncalls for bayes opt gp method in Lipschitz estimation')84 parser.add_argument('--lip_eps', type=float, default=0.01,85 help='eps for Lipschitz estimation')86 parser.add_argument('--lip_points', type=int, default=100,87 help='sample size for dataset Lipschitz estimation')88 parser.add_argument('--optim', type=str, default='gp',89 help='black-box optimization method')90 parser.add_argument('--autovis', action='store_true', default=False, help='Visualize the autoencoder / activations' )91 #####92 args = parser.parse_args()93 print("\nParameters:")94 for attr, value in sorted(args.__dict__.items()):95 print("\t{}={}".format(attr.upper(), value))96 return args97def main():98 args = parse_args()99 args.nclasses = 10100 args.theta_dim = args.nclasses101 model_path, log_path, results_path = generate_dir_names('mnist', args)102 train_loader, valid_loader, test_loader, train_tds, test_tds = load_mnist_data(103 batch_size=args.batch_size,num_workers=args.num_workers104 )105 if args.autovis:106 checkpoint = torch.load(os.path.join(model_path,'model_best.pth.tar'), map_location=lambda storage, loc: storage)107 checkpoint.keys()108 model = checkpoint['model']109 model.eval()110 with torch.no_grad():111 plot_concept_vis(model, args.nconcepts, 5, 4)112 exit()113 # print(next(iter(test_tds))[0].shape)114 # exit()115 if args.h_type == 'input':116 conceptizer = input_conceptizer()117 args.nconcepts = 28*28 + int(not args.nobias)118 elif args.h_type == 'cnn':119 #args.nconcepts += int(not args.nobias)120 conceptizer = image_cnn_conceptizer(28*28, args.nconcepts, args.concept_dim) #, sparsity = sparsity_l)121 else:122 #args.nconcepts += int(not args.nobias)123 conceptizer = image_fcc_conceptizer(28*28, args.nconcepts, args.concept_dim) #, sparsity = sparsity_l)124 parametrizer = image_parametrizer(28*28, args.nconcepts, args.theta_dim, only_positive = args.positive_theta)125 aggregator = additive_scalar_aggregator(args.concept_dim, args.nclasses)126 model = GSENN(conceptizer, parametrizer, aggregator) #, learn_h = args.train_h)127 # if args.load_model:128 # checkpoint = torch.load(os.path.join(model_path,'model_best.pth.tar'), map_location=lambda storage, loc: storage)129 # checkpoint.keys()130 # model = checkpoint['model']131 # model.eval()132 #133 # accum = get_concept_vis(model, args.nconcepts)134 # print(accum)135 # print(model)136 # x = next(iter(test_tds))[0]137 # x = test_tds[1][0]138 # print(x.shape)139 # plot_digit()140 # plot_digit(x)141 # x = x[None, :, :, :]142 # print(x.size())143 # with torch.no_grad():144 # enc = model.conceptizer.encode(x).squeeze()[:, None]145 # print(enc.shape)146 # plot_digit(enc)147 # plt.show()148 #149 # exit()150 if args.theta_reg_type in ['unreg','none', None]:151 trainer = VanillaClassTrainer(model, args)152 elif args.theta_reg_type == 'grad1':153 trainer = GradPenaltyTrainer(model, args, typ = 1)154 elif args.theta_reg_type == 'grad2':155 trainer = GradPenaltyTrainer(model, args, typ = 2)156 elif args.theta_reg_type == 'grad3':157 trainer = GradPenaltyTrainer(model, args, typ = 3)158 elif args.theta_reg_type == 'crosslip':159 trainer = CLPenaltyTrainer(model, args)160 else:161 raise ValueError('Unrecoginzed theta_reg_type')162 if not args.load_model and args.train:163 trainer.train(train_loader, valid_loader, epochs = args.epochs, save_path = model_path)164 trainer.plot_losses(save_path=results_path)165 else:166 checkpoint = torch.load(os.path.join(model_path,'model_best.pth.tar'), map_location=lambda storage, loc: storage)167 checkpoint.keys()168 model = checkpoint['model']169 trainer = VanillaClassTrainer(model, args)170 trainer.validate(test_loader, fold = 'test')171 All_Results = {}172 ### 1. Single point lipshiz estimate via black box optim173 # All methods tested with BB optim for fair comparison)174 features = None175 classes = [str(i) for i in range(10)]176 model.eval()177 if args.cuda:178 model.cpu()179 expl = gsenn_wrapper(model,180 mode = 'classification',181 input_type = 'image',182 multiclass=True,183 feature_names = features,184 class_names = classes,185 train_data = train_loader,186 skip_bias = True,187 verbose = False)188 #### Debug single input189 # x = next(iter(train_tds))[0]190 # attr = expl(x, show_plot = False)191 # pdb.set_trace()192 # #### Debug multi input193 # x = next(iter(test_loader))[0] # Transformed194 # x_raw = test_loader.dataset.test_data[:args.batch_size,:,:]195 # attr = expl(x, x_raw = x_raw, show_plot = True)196 # #pdb.set_trace()197 # #### Debug argmax plot_theta_stability198 if args.h_type == 'input':199 x = next(iter(test_tds))[0].numpy()200 y = next(iter(test_tds))[0].numpy()201 x_raw = (test_tds.test_data[0].float()/255).numpy()202 y_raw = revert_to_raw(x)203 att_x = expl(x, show_plot = False)204 att_y = expl(y, show_plot = False)205 lip = 1206 lipschitz_argmax_plot(x_raw, y_raw, att_x,att_y, lip)# save_path=fpath)207 #pdb.set_trace()208 ### 2. Single example lipschitz estimate with Black Box209 do_bb_stability_example = True210 if do_bb_stability_example:211 print('**** Performing lipschitz estimation for a single point ****')212 idx = 0213 print('Example index: {}'.format(idx))214 #x = train_tds[idx][0].view(1,28,28).numpy()215 x = next(iter(test_tds))[0].numpy()216 x_raw = (test_tds.test_data[0].float()/255).numpy()217 #x_raw = next(iter(train_tds))[0]218 # args.optim = 'gp'219 # args.lip_eps = 0.1220 # args.lip_calls = 10221 Results = {}222 lip, argmax = expl.local_lipschitz_estimate(x, bound_type='box_std',223 optim=args.optim,224 eps=args.lip_eps,225 n_calls=4*args.lip_calls,226 njobs = 1,227 verbose=2)228 #pdb.set_trace()229 Results['lip_argmax'] = (x, argmax, lip)230 # .reshape(inputs.shape[0], inputs.shape[1], -1)231 att = expl(x, None, show_plot=False)#.squeeze()232 # .reshape(inputs.shape[0], inputs.shape[1], -1)233 att_argmax = expl(argmax, None, show_plot=False)#.squeeze()234 #pdb.set_trace()235 Argmax_dict = {'lip': lip, 'argmax': argmax, 'x': x}236 fpath = os.path.join(results_path, 'argmax_lip_gp_senn.pdf')237 if args.h_type == 'input':238 lipschitz_argmax_plot(x_raw, revert_to_raw(argmax), att, att_argmax, lip, save_path=fpath)239 pickle.dump(Argmax_dict, open(240 results_path + '/argmax_lip_gp_senn.pkl', "wb"))241 pdb.set_trace()242 # print(asd.asd)243 #noise_stability_plots(model, test_tds, cuda = args.cuda, save_path = results_path)244 ### 3. Local lipschitz estimate over multiple samples with Black BOx Optim245 do_bb_stability = False246 if do_bb_stability:247 print('**** Performing black-box lipschitz estimation over subset of dataset ****')248 maxpoints = 20249 #valid_loader 0 it's shuffled, so it's like doing random choice250 mini_test = next(iter(valid_loader))[0][:maxpoints].numpy()251 lips = expl.estimate_dataset_lipschitz(mini_test,252 n_jobs=-1, bound_type='box_std',253 eps=args.lip_eps, optim=args.optim,254 n_calls=args.lip_calls, verbose=2)255 pdb.set_trace()256 Stability_dict = {'lips': lips}257 pickle.dump(Stability_dict, open(results_path + '_stability_blackbox.pkl', "wb"))258 All_Results['stability_blackbox'] = lips259 # add concept plot260 concept_grid(model, test_loader, top_k = 10, save_path = results_path + '/concept_grid.pdf')261 pickle.dump(All_Results, open(results_path + '_combined_metrics.pkl', "wb"))262 # args.epoch_stats = epoch_stats263 # save_path = args.results_path264 # print("Save train/dev results to", save_path)265 # args_dict = vars(args)266 # pickle.dump(args_dict, open(save_path,'wb') )267if __name__ == '__main__':...

Full Screen

Full Screen

tests.py

Source:tests.py Github

copy

Full Screen

1from django.core.exceptions import ValidationError2from django.test import TestCase3from django.utils import six4from .models import (5 TestModel,6 TestNullableModel,7 TestDefaultModel,8 DEFAULT_DURATION9)10from durationfield.utils import timestring11from datetime import timedelta12import unittest13class DurationFieldTests(TestCase):14 def setUp(self):15 self.test_tds = [16 timedelta(hours=1), # No days, no micro, single-digit hour17 timedelta(hours=10), # double-digit hour18 timedelta(hours=10, minutes=35, seconds=1),19 timedelta(days=1), # Day, no micro20 timedelta(days=1, microseconds=1), # Day, with micro21 timedelta(days=10), # Days, no micro22 timedelta(days=10, microseconds=1), # Days, with micro23 ]24 return super(DurationFieldTests, self).setUp()25 def _delta_to_microseconds(self, td):26 """27 Get the total number of microseconds in a timedelta, normalizing days and28 seconds to microseconds.29 """30 SECONDS_TO_US = 1000 * 100031 MINUTES_TO_US = SECONDS_TO_US * 6032 HOURS_TO_US = MINUTES_TO_US * 6033 DAYS_TO_US = HOURS_TO_US * 2434 td_in_ms = td.days * DAYS_TO_US + td.seconds * SECONDS_TO_US + td.microseconds35 self.assertEqual(timedelta(microseconds=td_in_ms), td)36 return td_in_ms37 def testTimedeltaStrRoundtrip(self):38 for td in self.test_tds:39 td_str = str(td)40 td_from_str = timestring.str_to_timedelta(td_str)41 self.assertEqual(td_from_str, td)42 def testTimedeltaStrInvalid(self):43 self.assertRaises(ValidationError, timestring.str_to_timedelta, 'fake')44 def testDbRoundTrip(self):45 """46 Data should remain the same when taking a round trip to and from the db47 """48 models = [TestModel, TestNullableModel, TestDefaultModel]49 for td in self.test_tds:50 for ModelClass in models:51 tm = ModelClass()52 tm.duration_field = td53 tm.save()54 tm_saved = ModelClass.objects.get(pk=tm.pk)55 self.assertEqual(tm_saved.duration_field, tm.duration_field)56 def testInvalidSaveAttempt(self):57 self.assertRaises(ValidationError, TestModel, duration_field='invalid')58 # not saved in DB59 self.assertEqual(TestModel.objects.count(), 0)60 def testDefaultValue(self):61 """62 Default value should be empty and fetchable63 """64 model_test = TestNullableModel()65 model_test.save()66 model_test_saved = TestNullableModel.objects.get(pk=model_test.pk)67 self.assertEqual(model_test.duration_field, None)68 self.assertEqual(model_test_saved.duration_field, None)69 def testDefaultGiven(self):70 """71 Default value should use the default argument72 """73 model_test = TestDefaultModel()74 model_test.save()75 model_test_saved = TestDefaultModel.objects.get(pk=model_test.pk)76 self.assertEqual(model_test.duration_field, DEFAULT_DURATION)77 self.assertEqual(model_test_saved.duration_field, DEFAULT_DURATION)78 def testApplicationType(self):79 """80 Timedeltas should be returned to the applciation81 """82 for td in self.test_tds:83 model_test = TestModel()84 model_test.duration_field = td85 model_test.save()86 model_test = TestModel.objects.get(pk=model_test.pk)87 self.assertEqual(td, model_test.duration_field)88 # Test with strings89 model_test = TestModel()90 model_test.duration_field = str(td)91 model_test.save()92 model_test = TestModel.objects.get(pk=model_test.pk)93 self.assertEqual(td, model_test.duration_field)94 # Test with int95 model_test = TestModel()96 model_test.duration_field = self._delta_to_microseconds(td)97 model_test.save()98 model_test = TestModel.objects.get(pk=model_test.pk)99 self.assertEqual(td, model_test.duration_field)100 @unittest.skipIf(six.PY3, 'long not present in Python 3')101 def testLongInPython2(self):102 for td in self.test_tds:103 # Test with long104 model_test = TestModel()105 model_test.duration_field = long(self._delta_to_microseconds(td))106 model_test.save()107 model_test = TestModel.objects.get(pk=model_test.pk)108 self.assertEqual(td, model_test.duration_field)109 def testInputTime(self):110 delta = timestring.str_to_timedelta("10:23")111 seconds = (10 * 60 * 60) + (23 * 60)112 self.assertEqual(seconds, delta.seconds)113 def testInputTimeSeonds(self):114 delta = timestring.str_to_timedelta("12:21:24")115 seconds = (12 * 60 * 60) + (21 * 60) + 24116 self.assertEqual(seconds, delta.seconds)117 def testInputTimeSecondsMicroseconds(self):118 delta = timestring.str_to_timedelta("11:20:22.000098")119 seconds = (11 * 60 * 60) + (20 * 60) + 22120 self.assertEqual(seconds, delta.seconds)121 self.assertEqual(98, delta.microseconds)122 def testInputTimeMicrosecondsRightPadZeros(self):123 delta = timestring.str_to_timedelta("11:20:22.160")124 self.assertEqual(160000, delta.microseconds)125 def testInputTimeMicrosecondsLeftPadZeros(self):126 delta = timestring.str_to_timedelta("11:20:22.016")127 self.assertEqual(16000, delta.microseconds)128 def testInputTimeMicrosecondsBothPadZeros(self):129 delta = timestring.str_to_timedelta("11:20:22.0160")130 self.assertEqual(16000, delta.microseconds)131 def testInputAll(self):132 delta = timestring.str_to_timedelta("1 year, 10 months, 3 weeks, 2 days, 3:40:50")133 days = (134 (1 * 365) +135 (10 * 30) +136 (3 * 7) +137 2138 )139 seconds = (140 (3 * 60 * 60) +141 (40 * 60) +142 50143 )144 self.assertEqual(145 days, delta.days146 )147 self.assertEqual(148 seconds, delta.seconds149 )150 def testInputAllAbbreviated(self):151 delta = timestring.str_to_timedelta("2y 9m 1w 20d 0:10:39")152 days = (153 (2 * 365) +154 (9 * 30) +155 (1 * 7) +156 20157 )158 seconds = (159 (0 * 60 * 60) +160 (10 * 60) +161 39162 )163 self.assertEqual(164 days, delta.days165 )166 self.assertEqual(167 seconds, delta.seconds168 )169 def testInputDaysOnly(self):170 delta = timestring.str_to_timedelta("24 days")171 self.assertEqual(172 24, delta.days...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Molotov automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful