How to use run_group method in autotest

Best Python code snippet using autotest_python

opt_finetune.py

Source:opt_finetune.py Github

copy

Full Screen

1import argparse2import logging3import numpy as np4import random5import torch6logger = logging.getLogger("SemEval")7def set_seed(args):8 random.seed(args.seed)9 np.random.seed(args.seed)10 torch.manual_seed(args.seed)11 if args.n_gpu > 0:12 torch.cuda.manual_seed_all(args.seed)13def bool_flag(v):14 if v.lower() in {"on", "true", "yes", "t", "y", "1"}:15 return True16 elif v.lower() in {"off", "false", "no", "f", "n", "0"}:17 return False18 else:19 raise argparse.ArgumentTypeError("Invalid value for a boolean flag!")20def set_args(parser, additional=False, **kwargs):21 path_config(parser)22 run_config(parser)23 ### add by func24 add_func = kwargs.get("add_func", None)25 if add_func is not None:26 for each in add_func:27 logger.info(f'Args add: [{each}]')28 eval(each)(parser)29 args = parser.parse_args()30 return args31def path_config(parser):32 path_group = parser.add_argument_group("Path information and required dirs")33 path_group.add_argument("--data_dir",34 default=None,35 type=str,36 required=True,37 help="The input data dir. Should contain the .csv files (or other data files) for the task.")38 path_group.add_argument("--train_file", default=None, type=str)39 path_group.add_argument("--dev_file", default=None, type=str)40 path_group.add_argument("--test_file", default=None, type=str)41 path_group.add_argument("--use_newd", default=False, type=bool_flag)42 path_group.add_argument("--split_dev", default=False, type=bool_flag)43 path_group.add_argument("--output_dir",44 default=None,45 type=str,46 required=True,47 help="The output directory where the model checkpoints will be written.")48 path_group.add_argument("--log_file", default="log.out", type=str)49 path_group.add_argument("--tfboard_log_dir", default="event.out", type=str)50 path_group.add_argument("--result_eval_file", default="result.eval.txt", type=str)51 path_group.add_argument("--result_test_file", default="result.test.txt", type=str)52 path_group.add_argument("--result_trial_file", default="result.trial.txt", type=str)53 path_group.add_argument("--model_type", default="bert", type=str)54 path_group.add_argument("--model_name_or_path",55 default=None,56 type=str,57 required=True,58 help="Bert pre-trained model selected in the list: bert-base-uncased, "59 "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "60 "bert-base-multilingual-cased, bert-base-chinese.")61 path_group.add_argument("--tokenizer_name_or_path",62 default=None,63 type=str,64 required=True,65 help="Bert pre-trained model selected in the list: bert-base-uncased, "66 "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "67 "bert-base-multilingual-cased, bert-base-chinese.")68 path_group.add_argument("--config_name",69 default=None,70 type=str,71 help="Pretrained config name or path if not the same as model_name")72 path_group.add_argument('--overwrite_output_dir', action='store_true',73 help="Overwrite the content of the output directory")74 path_group.add_argument('--overwrite_cache', action='store_true',75 help="Overwrite the cached input feature sets.")76def run_config(parser):77 run_group = parser.add_argument_group("Run configs")78 run_group.add_argument("--task_name", default='wic_pair',79 type=str,80 required=True)81 run_group.add_argument("--network_name", default='basic',82 type=str)83 ### Run parameters84 run_group.add_argument("--max_seq_length", default=128,85 type=int,86 help="The maximum total input sequence length after WordPiece tokenization. \n"87 "Sequences longer than this will be truncated, and sequences shorter \n"88 "than this will be padded.")89 run_group.add_argument("--do_lower_case",90 action='store_true',91 help="Set this flag if you are using an uncased model.")92 run_group.add_argument("--cls_segment_id", default=0,93 type=int)94 run_group.add_argument("--from_tf", action='store_true')95 ### Run Mode96 run_group.add_argument("--do_train",97 action='store_true',98 help="Whether to run training.")99 run_group.add_argument("--do_eval",100 action='store_true',101 help="Whether to run eval on the dev set.")102 run_group.add_argument("--do_test",103 action='store_true',104 help="Whether to run test on the test set.")105 run_group.add_argument("--do_trial",106 action='store_true',107 help="Whether to run test on the unofficial dev set.")108 run_group.add_argument("--have_test_label",109 action='store_true',110 help="Used when testing")111 ### Train parameters112 run_group.add_argument("--train_batch_size",113 default=32,114 type=int,115 help="Total batch size for training.\n"116 "Discarded.")117 run_group.add_argument("--per_gpu_train_batch_size",118 default=8,119 type=int,120 help="Batch size per GPU/CPU for training.")121 run_group.add_argument("--eval_batch_size",122 default=8,123 type=int,124 help="Total batch size for eval. \n"125 "Discarded.")126 run_group.add_argument("--per_gpu_eval_batch_size",127 default=8,128 type=int,129 help="Batch size per GPU/CPU for evaluation.")130 run_group.add_argument("--learning_rate",131 default=5e-5,132 type=float,133 help="The initial learning rate for Adam.")134 run_group.add_argument("--adam_epsilon",135 # default=1e-8,136 default=1e-6,137 type=float,138 help="Epsilon for Adam optimizer.")139 run_group.add_argument("--num_train_epochs",140 default=3.0,141 type=float,142 help="Total number of training epochs to perform.")143 run_group.add_argument("--max_steps",144 default=-1,145 type=int,146 help="If > 0: set total number of training steps to perform. Override num_train_epochs.")147 run_group.add_argument("--warmup_proportion",148 default=0.1,149 type=float,150 help="Proportion of training to perform linear learning rate warmup for. "151 "E.g., 0.1 = 10%% of training.")152 run_group.add_argument("--warmup_steps",153 default=0, type=int,154 help="Linear warmup over warmup_steps.")155 run_group.add_argument("--weight_decay",156 # default=0.0,157 default=0.01,158 type=float,159 help="Weight deay if we apply some.")160 run_group.add_argument('--gradient_accumulation_steps',161 type=int,162 default=1,163 help="Number of updates steps to accumulate before performing a backward/update pass.")164 run_group.add_argument("--max_grad_norm",165 default=1.0, # default is 1.0166 type=float,167 help="Max gradient norm.")168 run_group.add_argument('--loss_scale',169 type=float, default=0,170 help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"171 "0 (default value): dynamic loss scaling.\n"172 "Positive power of 2: static loss scaling value.\n")173 run_group.add_argument('--lm_coef', type=float, default=0.9,174 help="parameter to balance lm loss and task loss for GPT/GPT2")175 run_group.add_argument('--add_loss_coef', type=float, default=1.0,176 help="parameter to balance main loss and additional loss for Task")177 ### Environment178 run_group.add_argument("--no_cuda",179 action='store_true',180 help="Whether not to use CUDA when available")181 run_group.add_argument("--local_rank",182 default=-1,183 type=int,184 help="local_rank for distributed training on gpus")185 run_group.add_argument('--seed',186 default=42,187 type=int,188 help="random seed for initialization")189 run_group.add_argument('--fp16',190 action='store_true',191 help="Whether to use 16-bit float precision instead of 32-bit")192 run_group.add_argument('--fp16_opt_level',193 type=str, default='O1',194 help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."195 "See details at https://nvidia.github.io/apex/amp.html")196 ### Others197 run_group.add_argument('--logging_steps', type=int, default=100,198 help="Log every X updates steps.")199 run_group.add_argument('--save_steps', type=int, default=0,200 help="Save checkpoint every X updates steps.")201 run_group.add_argument("--eval_all_checkpoints", action='store_true',202 help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")203 run_group.add_argument("--evaluate_during_training", action='store_true',204 help="Rul evaluation during training at each logging step.")205 run_group.add_argument("--evaluate_epoch", action='store_true',206 help="Rul evaluation during training at each logging step.")207 ### Task specific208 run_group.add_argument("--output_mode",209 default="classification",210 type=str)211 run_group.add_argument("--num_choices", default=2,212 type=int)213 run_group.add_argument("--have_passage",214 action='store_true',215 help="if example have context passage")216def add_args(now_args, additional_args):217 now_args, additional_args = vars(now_args), vars(additional_args)218 for k,v in additional_args.items():219 if k not in now_args:220 now_args[k] = v221 logger.info("Update additional config {}: {}".format(k,v))222 else:223 if v != now_args[k]:224 logger.info("Warn: additional config {}: {}/{} exist.".format(k, now_args[k], v))225 return argparse.Namespace(**now_args)226def check_args_version(load_args, now_args):227 load_args, now_args = vars(load_args), vars(now_args)228 for k, v in now_args.items():229 if k not in load_args:230 load_args[k] = v231 logger.info("Update load checkpoint config {}: {}".format(k,v))232 return argparse.Namespace(**load_args)233def override_args(old_args, new_args):234 KEEP_CONFIG = {}235 old_args, new_args = vars(old_args), vars(new_args)236 for k in old_args.keys():237 if k in new_args and old_args[k] != new_args[k]:238 if k in KEEP_CONFIG:239 logger.info("Overriding saved {}: {} --> {}".format(k, old_args[k], new_args[k]))240 old_args[k] = new_args[k]241 else:242 logger.info("Keeping saved {}: {}".format(k, old_args[k]))243 return argparse.Namespace(**old_args)244def task_lm_finetune(parser):245 lm_task_group = parser.add_argument_group('Task configs: lm fine-tune')246 ### for lm finetuning247 lm_task_group.add_argument("--mlm",248 action='store_true',249 help="Train with masked-language modeling loss instead of language modeling.")250 lm_task_group.add_argument("--mlm_probability",251 type=float,252 default=0.15,253 help="Ratio of tokens to mask for masked language modeling loss")254 lm_task_group.add_argument("--block_size",255 default=-1,256 type=int,257 help="Optional input sequence length after tokenization."258 "The training dataset will be truncated in block of this size for training."259 "Default to the model max input length for single sentence inputs "260 "(take into account special tokens).")261 lm_task_group.add_argument('--save_total_limit',262 type=int,263 default=None,264 help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, '265 'does not delete by default')266 lm_task_group.add_argument("--cache_dir", default="", type=str,267 help="Optional directory to store the pre-trained models downloaded from s3 "...

Full Screen

Full Screen

plot_network_strain.py

Source:plot_network_strain.py Github

copy

Full Screen

1import argparse2from copy import deepcopy3import pandas as pd4import h5py5import matplotlib.pyplot as plt6import numpy as np7import seaborn as sns8from scipy import stats9import matplotlib.pylab as pylab10params = {'axes.titlesize':'x-large',11 'axes.labelsize': 'x-large'}12pylab.rcParams.update(params)13DIRECTORY = 'final/plots/' # TODO change this as needed14def plot_failures(failure_points, lowest, highest):15 16 print(len(failure_points))17 18 plt.figure()19 20 plt.xlim(0, 18.5)21 plt.hist(failure_points, bins=np.arange(0.5, 19.5, 1), cumulative=True, color='orange', edgecolor='k')22 23 plt.xticks(np.arange(1, 19))24 plt.ylabel('% Networks Failed')25 plt.xlabel('Total Task Count')26 plt.savefig('{}failures.pdf'.format(DIRECTORY), dpi=300, format='pdf')27def plot_strain(run_groups, metric):28 29 plt.figure()30 for run_group in run_groups:31 plt.plot(run_group[1], label=run_group[0])32 33 plt.legend(loc='upper left', fancybox=True, shadow=True)34 # if metric == 'Final Training Iteration Loss':35 # plt.ylim(0, 10)36 plt.ylabel(metric)37 plt.xlabel('Task')38 plt.savefig('{}{}.pdf'.format(DIRECTORY, metric), dpi=300, format='pdf')39def parse_h5(filename):40 41 f = h5py.File(filename, 'r') 42 failure = f['failure'][0]43 total = []44 st_dev = []45 avg = []46 maximum = []47 loss = []48 fisher_information = []49 for data in f['fisher_total']:50 total.append(data)51 for data in f['post_training_loss']:52 loss.append(data)53 for data in f['fisher_average']:54 avg.append(data)55 for data in f['fisher_st_dev']:56 st_dev.append(data)57 for data in f['fisher_max']:58 maximum.append(data)59 # for data in f['fisher_information']:60 # fisher_information.append([])61 # for task in data:62 # fisher_information[len(fisher_information) - 1].append(task)63 f.close()64 return (failure, total), (failure, st_dev), (failure, avg), (failure, maximum), (failure, loss)#, (failure, fisher_information)65def plot_fisher_dist(run_group):66 plt.figure()67 tasks = []68 for fi_data in run_group[1]:69 tasks.append(fi_data)70 # bins = np.arange(0, 2.5, 0.01)71 plt.hist(tasks, bins=[15, 16, 17, 18], label=np.arange(0, run_group[0] + 1))72 plt.xticks([15, 16, 17, 18])73 # for i, task in enumerate(tasks):74 # sns.distplot(task)75 plt.legend(loc='upper right', fancybox=True, shadow=True)76 plt.savefig('{}fisher_distribution_failure_at_{}.eps'.format(DIRECTORY, run_group[0]), dpi=300, format='eps')77def main():78 #sns.set(color_codes=True)79 parser = argparse.ArgumentParser(description='Plotting Tool')80 parser.add_argument('--filenames',81 nargs='+', type=str, default=['NONE'], metavar='FILENAMES',82 help='names of .h5 files containing experimental result data')83 args = parser.parse_args()84 runs = []85 for filename in args.filenames:86 runs.append([])87 #total, st_dev, avg, maximum, loss, fisher_information = parse_h5(filename)88 total, st_dev, avg, maximum, loss = parse_h5(filename)89 runs[len(runs) - 1].append(total)90 runs[len(runs) - 1].append(st_dev)91 runs[len(runs) - 1].append(avg)92 runs[len(runs) - 1].append(maximum)93 runs[len(runs) - 1].append(loss)94 #runs[len(runs) - 1].append(fisher_information)95 failure_points = []96 for data in runs:97 failure_points.append(data[0][0])98 highest = np.amax(failure_points)99 100 lowest = np.amin(failure_points)101 102 plot_failures(failure_points, lowest, highest)103 # 104 # metrics = ['Sum of Fisher Information','Standard Deviation of Fisher Information','Average of Fisher Information',105 # 'Maximum Fisher Information Value','Final Training Iteration Loss']106 # for strain_metric in range(5):107 # # strain_per_task is an array organized like so:108 # # [109 # # [0, []] row 0110 # # [c1, [x1, x2, x3]] row 1: [# of runs failed at task 1,111 # # average network strain per task (index) for runs ending at task 1]112 # # [c2, [y1, y2, y3]] row 2: [# of runs failed at task 2,113 # # average network strain per task (index) for runs ending at task 2]114 # # ...115 # # ]116 # strain_per_task = []117 # for i in np.arange(0, highest+1):118 # strain_per_task.append([0, np.zeros(i)])119 # for data in runs:120 # for t in range(len(data[strain_metric][1])):121 # strain_per_task[data[strain_metric][0]][1][t] += data[strain_metric][1][t]122 # strain_per_task[data[strain_metric][0]][0] += 1123 # for row in range(len(strain_per_task)):124 # for i in range(len(strain_per_task[row][1])):125 # if strain_per_task[row][0] != 0:126 # strain_per_task[row][1][i] /= strain_per_task[row][0]127 # print(strain_per_task)128 # run_groups = []129 # for row in range(len(strain_per_task)):130 # if strain_per_task[row][0] > 0:131 # run_groups.append((row, strain_per_task[row][1]))132 # print(run_groups)133 # metric = metrics[strain_metric]134 # plot_strain(run_groups, metric)135 # plot summed fisher info distribution136 # fisher_summed = []137 #138 # print(len(runs[0][5][1][1]))139 #140 #141 # for i in np.arange(0, highest + 1):142 # fisher_summed.append([0, np.zeros((i, len(runs[0][5][1][1])))])143 #144 # for data in runs:145 # for t in range(len(data[5][1])):146 # for fi in range(len(data[5][1][t])):147 # fisher_summed[data[5][0]][1][t][fi] += data[5][1][t][fi]148 #149 # fisher_summed[data[5][0]][0] += 1150 #151 # # average fisher summed info for each task for each run group152 # for row in range(len(fisher_summed)):153 # for task in range(len(fisher_summed[row][1])):154 # for fisher_info in range(len(fisher_summed[row][1][task])):155 # if fisher_summed[row][0] != 0:156 # fisher_summed[row][1][task][fisher_info] /= fisher_summed[row][0]157 #158 #159 # run_groups = []160 #161 # for row in range(len(fisher_summed)):162 # if fisher_summed[row][0] > 0:163 # run_groups.append((row, fisher_summed[row][1]))164 #165 # print(run_groups)166 #167 # # run groups is now [...(failure_point, [[fisher info task 0][fi t1][fi t2]...])...]168 # for group in run_groups:169 # plot_fisher_dist(group)170if __name__ == '__main__':...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful