How to use test_stuff method in hypothesis

Best Python code snippet using hypothesis

make_experiment.py

Source:make_experiment.py Github

copy

Full Screen

1import argparse2from myutils.formatter import PrettyFormatter3parser=argparse.ArgumentParser(formatter_class=PrettyFormatter)4#5parser.add_argument('--output_core', type=str, default='experiments/def_result',6 help='Core part of the output files (in the form: output_core_appendix)')7#8parser.add_argument('--splits_checkpoint', type=str, default=None,9 help=('Checkpoint containing indeces of train/test splits. '10 'If present, will overriden splits generation and try to read from this file.'))11#12parser.add_argument('--data_checkpoint', type=str, default=None, 13 help=('Checkpoint containing preprocessed dataset. '14 'If present, will override preprocessing step and try to read from this file.'))15#16parser.add_argument('--scale_positive', type=float, default=0.5, 17 help=('Scale assigned to positive class. Value of 0.5 means that both negative and'18 ' positive classes are treated equally.'))19#20parser.add_argument('--config', type=str, default='config_files/def_rdkit_config.yaml', 21 help='YAML file defineing exeriment documentation (see README)')22#23parser.add_argument('--force', type=str, choices=['all','splits'], default=None, 24 help=('Forces data preprocess/splits calculation despite checkpoint being present.\n'25 '* all - force both data prerocess and splits generation\n'26 '* splits - force only splits generation\n'))27#28parser.add_argument('--epochs', type=int, default=100,29 help='Number of training epochs') 30#31parser.add_argument('--validation_mode', type=str, choices=['5cv_test','val_test'], 32 default='val_test', help=('Type of validation procedure. Validation score is '33 'used for model selection, which is than tested on the held-out test set.\n'34 '* val_test - train on 72%% of data, validate on 18%% of data and test on 10%% of data\n'35 '* 5cv_test - do 5-fold cross-validation on 90%% of data, test on 10%%%% of data\n'))36#37args=parser.parse_args()38#name checkoints of this run (if checkpoint file is not provided39if args.data_checkpoint is None:40 args.data_checkpoint = '%s_data_chk.pkz'%args.output_core41if args.splits_checkpoint is None:42 args.splits_checkpoint = '%s_splits_chk.pkz'%args.output_core43#set override flags; preprocessing and splits will be done,44#results will be written in checkpoints45args.force_resample=False46args.force_resplit=False47if args.force=='all':48 args.force_resample=True49 args.force_resplit=True50elif args.force=='splits':51 args.force_resplit=True52#===== These imports take time, so are placed here. Thanks to that printing help is quick53from data_preprocess import load_yaml, load_from_config, yaml, gz_pickle, gz_unpickle, tpr_tnr54from data_preprocess import balance_masked_weights, split_data, isfile, np, slice_data, make_test_data, scale_weights55from models import make_model, make_av_std, make_bayesian_prediction, SaveSelected, tasks_balanced_scores56import logging57#========= logging stuff ===========58logger=logging.getLogger()59level=logging.INFO60formatter=logging.Formatter('%(asctime)s: %(levelname)s): %(message)s')61log_name = '%s_%s.log'%(args.output_core, args.validation_mode)62handlers= logging.FileHandler(log_name), logging.StreamHandler()63for handler in handlers:64 handler.setFormatter(formatter)65 handler.setLevel(level)66 logger.addHandler(handler)67logger.setLevel(level)68#======= loading stuff ==========69logger.info('Start')70config = load_yaml(args.config)71config['loader_config']['data_checkpoint'] = args.data_checkpoint72x, y, weights = load_from_config(config, args.force_resample)73#======= split data =======74checkpoint_name = args.splits_checkpoint75cv_cfg = config['cross_validation']76cv_cfg['checkpoint'] = checkpoint_name77cv_splits, train_idx, test_idx, weights = split_data(y, weights, cv_cfg, 78 args.validation_mode, args.force_resplit)79data_stuff = [x, y, weights]80kind = config['model_params'].get('kind', 'any')81is_multitask = 'multitask' in kind82if is_multitask:83 axis = 184 N_outputs = len(y)85else:86 axis=087 N_outputs = -188to_relax = config['model_params'].get('relax', False)89#======= print info ======= 90config_str = yaml.dump(config, default_flow_style=False)91logger.info('Config file: %s'%args.config)92logger.info('Force resample/reload: %s Force resplit: %s'%(args.force_resample, args.force_resplit))93logger.info('weights_are_none %s'%(weights is None))94logger.info('Validation mode: %s'%args.validation_mode)95logger.info('output_core: %s'%args.output_core)96logger.info('Epochs: %i'%args.epochs)97logger.info('Positive scaling: %8.3f'%args.scale_positive)98logger.info('Data loaded.\n== Config ==\n%s\n============'%config_str)99data_summary = [str(obj) for obj in [np.shape(y), np.sum(y, axis=axis)]]100logger.info('Y shape:%s Y sum:%s'%tuple(data_summary))101#write all configuration to the YAML file102with open('%s_post.yaml'%args.output_core, 'w') as f:103 f.write(config_str)104#====== train =======105batch_size=100 ## add config?? later??106epochs=args.epochs107history=[]108N_inputs = config['model_params'].get('num_inputs', 1)109if N_inputs==1:110 input_shape = x.shape[1]111else:112 input_shape = [arr.shape for arr in x]113data_shapes = config.get('data_shapes', 'none')114if data_shapes!='none':115 input_shape = data_shapes116for cv_train_idx, cv_val_idx in cv_splits:117 train = slice_data(data_stuff, cv_train_idx, N_inputs)118 val = slice_data(data_stuff, cv_val_idx, N_inputs)119 if is_multitask:120 logger.info('Train Y: %s'%str(np.sum(train[1],axis=1)))121 for i,vy in enumerate(train[1]):122 key='out%i'%i123 train[2][key] = balance_masked_weights(vy,train[2][key])124 val[2][key] = balance_masked_weights(val[1][i],val[2][key])125 else:126 train[2] = scale_weights(train[1], train[2], args.scale_positive)127 val[2] = scale_weights(val[1], val[2], args.scale_positive)128 logger.info('Train Y: %s'%str(np.sum(train[1],axis=0)))129 model, metric = make_model(input_shape, np.shape(y), config['model_params']) 130 logger.info('Model build')131 result = model.fit(train[0], train[1], sample_weight = train[2],132 validation_data = val,133 batch_size=batch_size, epochs=epochs, shuffle=True, verbose=1)134 if to_relax:135 logger.info('Relaxing')136 tr_score, _ = make_av_std([result.history], metric, is_multitask, N_outputs)137 tst_score, _ = make_av_std([result.history], 'val_%s'%metric, is_multitask, N_outputs)138 logger.info('Last stats: Train: %8.3f Val: %8.3f'%(tr_score.max(), tst_score.max()))139 w= model.get_weights()140 model, _ = make_model(input_shape, np.shape(y), config['model_params'])141 model.set_weights(w)142 for layer in model.layers: layer.trainable=True143 result = model.fit(train[0], train[1], sample_weight = train[2],144 validation_data = val[:2],145 batch_size=batch_size, epochs=epochs, shuffle=True, verbose=1)146 tr_score, _ = make_av_std([result.history], metric, is_multitask, N_outputs)147 tst_score, _ = make_av_std([result.history], 'val_%s'%metric, is_multitask, N_outputs)148 logger.info('After relaxation: Train: %8.3f Val: %8.3f'%(tr_score.max(), tst_score.max()))149 history.append(result.history)150#====== Average ===============151if is_multitask:152 metric = 'out%i_'+metric153cv_av, cv_std = make_av_std(history, metric, is_multitask, N_outputs)154cv_val_av, cv_val_std = make_av_std(history, 'val_%s'%metric, is_multitask, N_outputs)155val_loss = np.array([x['val_loss'] for x in history]).mean(axis=0)156if is_multitask:157 best_cv_idx = np.argmax(val_loss)#cv_val_av.argmax()158else:159 best_cv_idx = cv_val_av.argmax()160logging.info('Val loss: %i-%8.6f'%(best_cv_idx,val_loss[best_cv_idx]))161test_epochs = args.epochs #best_cv_idx+1162#====== test ===========163logger.info('Testing')164test_stuff = make_test_data(data_stuff, test_idx, N_inputs, config)#slice_data(data_stuff, test_idx, N_inputs)165train_stuff = slice_data(data_stuff, train_idx, N_inputs)166test_stuff = list(test_stuff)167train_stuff = list(train_stuff)168#TODO: repetition-refactor169if is_multitask:170 for i,vy in enumerate(train_stuff[1]):171 key='out%i'%i172 train_stuff[2][key] = balance_masked_weights(vy, train_stuff[2][key])173 test_stuff[2][key] = balance_masked_weights(test_stuff[1][i], test_stuff[2][key])174else:175 train_stuff[2] = scale_weights(train_stuff[1], train_stuff[2], args.scale_positive)176 test_stuff[2] = scale_weights(test_stuff[1], test_stuff[2], args.scale_positive)177model, _ = make_model(input_shape, np.shape(y), config['model_params']) 178logger.info('Model build')179saver = SaveSelected(best_cv_idx)180try:181 result = model.fit(train_stuff[0], train_stuff[1], sample_weight=train_stuff[2],182 validation_data = test_stuff,183 batch_size=batch_size, epochs=test_epochs, shuffle=True, verbose=1, callbacks=[saver])184 if to_relax:185 logger.info('Relaxing')186 tr_score, _ = make_av_std([result.history], metric, is_multitask, N_outputs)187 tst_score, _ = make_av_std([result.history], 'val_%s'%metric, is_multitask, N_outputs)188 logger.info('Last stats: Train: %8.3f Val: %8.3f'%(tr_score.max(), tst_score.max()))189 for layer in model.layers: layer.trainable=True190 result = model.fit(train_stuff[0], train_stuff[1], sample_weight=train_stuff[2],191 validation_data = test_stuff[:2],192 batch_size=batch_size, epochs=test_epochs, shuffle=True, verbose=1, callbacks=[saver])193 tr_score, _ = make_av_std([result.history], metric, is_multitask, N_outputs)194 tst_score, _ = make_av_std([result.history], 'val_%s'%metric, is_multitask, N_outputs)195 logger.info('After relaxation: Train: %8.3f Val: %8.3f'%(tr_score.max(), tst_score.max()))196except:197 xx = list(test_stuff[0])198 yy = test_stuff[1]199 result = model.fit(train_stuff[0], train_stuff[1], sample_weight=train_stuff[2],200 validation_data = (xx, yy, test_stuff[2]),201 batch_size=batch_size, epochs=test_epochs, shuffle=True, verbose=1, callbacks=[saver])202 if to_relax:203 logger.info('Relaxing')204 tr_score, _ = make_av_std([result.history], metric, is_multitask, N_outputs)205 tst_score, _ = make_av_std([result.history], 'val_%s'%metric, is_multitask, N_outputs)206 logger.info('Last stats: Train: %8.3f Val: %8.3f'%(tr_score.max(), tst_score.max()))207 for layer in model.layers: layer.trainable=True208 result = model.fit(train_stuff[0], train_stuff[1], sample_weight=train_stuff[2],209 validation_data = (xx, yy),210 batch_size=batch_size, epochs=test_epochs, shuffle=True, verbose=1, callbacks=[saver])211#====== save history =====212logger.info('Saving history...')213test_av, test_std = make_av_std([result.history], metric, is_multitask, N_outputs)214test_val_av, test_val_std = make_av_std([result.history], 'val_%s'%metric, is_multitask, N_outputs)215logger.info('Best CV:')216logger.info('epoch: %i'%(best_cv_idx+1))217cv_max_data = tuple(arr[best_cv_idx] for arr in [cv_av, cv_std, cv_val_av, cv_val_std])218logger.info('train: %8.3f (%4.3f) val:%8.3f (%4.3f)'%cv_max_data)219logger.info('Test:')220test_max_data = tuple(arr[best_cv_idx] for arr in [test_av, test_val_av])221logger.info('train: %8.3f val:%8.3f'%test_max_data)222logger.info('Resetting model weights from epoch with best cross-val score')223saver.reset()224if is_multitask:225 logger.info('Details at best epoch:')226 for i in range(N_outputs):227 out_trn = result.history[metric%i][best_cv_idx]228 out_tst = result.history['val_'+metric%i][best_cv_idx]229 logger.info(' out%-2i: train: %8.3f test %8.3f'%(i, out_trn, out_tst))230else:231 if type(test_stuff[0]).__name__=='tuple': 232 x_to_use = list(test_stuff[0])233 else:234 x_to_use = test_stuff[0]235 pred = model.predict(x_to_use)236 tpr, tnr = tpr_tnr(pred, test_stuff[1])237 logger.info('TPR: %8.3f TNR: %8.3f'%(tpr, tnr))238with open('%s_%s_history.txt'%(args.output_core, args.validation_mode), 'w') as f:239 data_order = [cv_av, cv_std, cv_val_av, cv_val_std, test_av, test_val_av]240 titles = ('EPOCH', 'train_cv', 'train_cv_err', 'val_cv', 'val_cv_err', \241 'test', 'test_val')242 line_format = '%5i %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f\n'243 f.write('%5s %12s %12s %12s %12s %12s %12s\n'%titles)244 for i in range(args.epochs):245 to_write = [i+1] + [arr[i] for arr in data_order]246 f.write(line_format%tuple(to_write))247if is_multitask:248 prediction_train = model.predict(train_stuff[0])249 prediction_test = model.predict(test_stuff[0])250 score_train = tasks_balanced_scores(prediction_train, train_stuff[1], train_stuff[2])251 score_test = tasks_balanced_scores(prediction_test, test_stuff[1], test_stuff[2])252 auc_test = tasks_balanced_scores(prediction_test, test_stuff[1], test_stuff[2], auc=True)253 logger.info('Correcting for masking information')254 logger.info('Train mean: %8.3f Test mean: %8.3f Test mean auc %8.3f'%(score_train.mean(), score_test.mean(), np.mean(auc_test)))255 for i in range(N_outputs):256 logger.info(' out%-2i: train: %8.3f test %8.3f auc: %8.3f'%(i, score_train[i], score_test[i], auc_test[i]))257logger.info('Saving model weights')258gz_pickle(args.output_core+'_train_weights.pkz', model.get_weights())259#======= bayesian prediction =======260do_bayes = config['model_params'].get('dropout_flag',False)261if do_bayes:262 logger.info('Making bayesian prediction on %i samples'%test_stuff[1].shape[0])263 bayesian_result = make_bayesian_prediction(model, test_stuff[0])264 265 logger.info('Done. Saving...')266 with open('%s_%s_bayesian_result.txt'%(args.output_core, args.validation_mode), 'w') as f:267 order = ('true_flag','mean_prob', 'aleatoric', 'epistemic')268 f.write('%10s %10s %10s %10s \n'%order)269 270 for i in range(test_stuff[1].shape[0]):271 f.write('%10i '%test_stuff[1][i].argmax())272 273 for key in order[1:]:274 f.write('%10.4f '%bayesian_result[key][i])275 276 f.write('\n')277 ...

Full Screen

Full Screen

drops.py

Source:drops.py Github

copy

Full Screen

1"""2System for managing dropped items, dropped knowledge, distributed NPCs.3For allowing map generators to create things that might appear elsewhere in the4world, thus allowing the kind of interconnectedness that the game would5otherwise lack. This is the focus of drops here, rather than the economic6distribution of in-game resources.7"""8import random9common = 1010uncommon = 511rare = 112super_rare = 0.113class DropRegister:14 """15 Tracks droppable items.16 """17 def __init__(self):18 self.drops = []19 self.total_weight = 020 def add(self, generator, rarity, limit = None):21 self.drops.append((generator, rarity, limit))22 self.total_weight += rarity23 def gen_drop(self):24 index = random.random()*self.total_weight25 current_weight = 026 for generator, weight, limit in self.drops:27 current_weight += weight28 if current_weight > index:29 if limit != None:30 self.drops.remove((generator, weight, limit))31 if limit > 1:32 self.drops.append((generator, weight, limit-1))33 else:34 self.total_weight -= weight35 if callable(generator):36 return generator()37 else:38 return generator39loot = DropRegister()40keys = DropRegister()41def test_drops():42 test_stuff = DropRegister()43 test_stuff.add(lambda: "a")44 assert test_stuff.total_weight == common45 test_stuff.add(lambda: "b")46 assert test_stuff.total_weight == common*247 test_stuff.add(lambda: "c", limit = 1) 48 results = [test_stuff.gen_drop() for i in range(100)]49 for result in results:50 assert result in ["a", "b", "c"]51 assert results.count("c") <= 152 if results.count("c") == 1:...

Full Screen

Full Screen

test_import.py

Source:test_import.py Github

copy

Full Screen

1from testmod import test_stuff2from testmod import test_fxns3s = "alice killed bob"4result = test_stuff.countWords(s, "alice")5print result6print(result)7o = test_stuff.OldStyleClass("alice")8print o9n = test_stuff.NewStyleClass("bob")10print n11l = []12for i in range(5):13 l.append(test_stuff.OldStyleClass(i))14print l15for i in l:16 print i17print test_fxns.A_GLOBAL18test_fxns.incr_global()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful