How to use log_dir method in Slash

Best Python code snippet using slash

Run_save_experiment.py

Source:Run_save_experiment.py Github

copy

Full Screen

1import sys2sys.path.append('..')3from src import ML4from src import config5from src.Standard import SpectralAverage6import os7from tqdm import tqdm8import argparse9from datetime import datetime10import numpy as np11import xlsxwriter12import csv13def main():14 parser = argparse.ArgumentParser(15 description="Options for saving experiments to an .xlsx sheet ")16 # parser.add_argument('data_type',17 # type=str,18 # help="Input data type: contigs, erps, or spectra")19 parser.add_argument('--log_dirs',20 dest='log_dirs',21 nargs='+',22 default=["logs/fit"],23 help="(Default: ['logs/fit']) Parent directory for "24 + "checkpoints.")25 parser.add_argument('--checkpoint_dirs',26 dest='checkpoint_dirs',27 nargs='+',28 default=None,29 help="(Default: None) Checkpoint directory (most "30 + "likely found in logs/fit) containing saved model.")31 # save the variables in 'args'32 args = parser.parse_args()33 # data_type = args.data_type34 log_dirs = args.log_dirs35 checkpoint_dirs = args.checkpoint_dirs36 lrs = [1e-05,37 1.2742749857031348e-05,38 1.623776739188721e-05,39 2.06913808111479e-05,40 2.6366508987303556e-05,41 3.359818286283781e-05,42 4.281332398719396e-05,43 5.4555947811685143e-05,44 6.951927961775606e-05,45 8.858667904100833e-05,46 0.00011288378916846884,47 0.0001438449888287663,48 0.00018329807108324357,49 0.00023357214690901214,50 0.00029763514416313193,51 0.000379269019073225,52 0.0004832930238571752,53 0.0006158482110660261,54 0.0007847599703514606,55 0.001]56 # # ERROR HANDLING57 # if data_type not in ["erps", "spectra", "contigs"]:58 # print(59 # "Invalid entry for data_type. "60 # + "Must be one of ['erps', 'contigs', 'spectra']")61 # raise ValueError62 # sys.exit(3)63 #64 # for log_dir in log_dirs:65 #66 # if checkpoint_dirs is not None:67 # for checkpoint_dir in checkpoint_dirs:68 # if not os.path.isdir(checkpoint_dir):69 # if not os.path.isdir(log_dir+checkpoint_dir):70 # print(71 # "Invalid entry for checkpoint directory, "72 # + "path does not exist as directory.")73 # raise FileNotFoundError74 # sys.exit(3)75 # else:76 # checkpoint_dir = log_dir+checkpoint_dir77 #78 # if checkpoint_dir is None:79 # checkpoint_dirs = [80 # log_dir + folder81 # for folder in os.listdir(log_dir)82 # if "_"+data_type in folder]83 # else:84 # checkpoint_dirs = [checkpoint_dir]85 workbook = xlsxwriter.Workbook('Experiments.xlsx')86 for z, log_dir in enumerate(log_dirs):87 # if checkpoint_dirs == None:88 checkpoint_dirs = os.listdir(log_dir)89 checkpoint_dirs.sort()90 worksheet = workbook.add_worksheet(str(z))91 # worksheet.set_default_row(200)92 # worksheet.set_column(6, 21, 200)93 # headers94 # model config / training95 worksheet.write('A1', 'Training Studies')96 worksheet.write('B1', 'Learning Rate')97 worksheet.write('C1', 'Final Accuracy')98 worksheet.write('D1', 'Final Loss')99 worksheet.write('E1', 'Final Validation Accuracy')100 worksheet.write('F1', 'Final Validation Loss')101 # training image headers102 worksheet.write('G1', 'Training / Validation Accuracy')103 worksheet.write('H1', 'Training / Validation Loss')104 worksheet.write('I1', 'ROC Curve')105 # testing image headers106 worksheet.write('J1', 'Confusion Matrix')107 worksheet.write('K1', 'Output Nodes (3D)')108 # worksheet.write('L1', 'Eval lyons_pain (old)')109 # worksheet.write('M1', 'Eval lyons_pain (new)')110 # worksheet.write('N1', 'Eval glynn_pain')111 # worksheet.write('O1', 'Eval rehab')112 # worksheet.write('P1', 'Eval ref 24-30')113 # worksheet.write('Q1', 'Eval ref 31-40')114 # worksheet.write('R1', 'Eval ref 41-50')115 # worksheet.write('S1', 'Eval ref 51-60')116 # worksheet.write('T1', 'Eval ref 61-70')117 # worksheet.write('U1', 'Eval ref 71-80')118 # worksheet.write('V1', 'Eval ref 81+')119 # worksheet.write('J1', 'Eval CU_pain')120 # worksheet.write('K1', 'Eval CU_control')121 # worksheet.write('L1', 'Eval lyons_pain (old)')122 # worksheet.write('M1', 'Eval lyons_pain (new)')123 # worksheet.write('N1', 'Eval glynn_pain')124 # worksheet.write('O1', 'Eval rehab')125 # worksheet.write('P1', 'Eval ref 24-30')126 # worksheet.write('Q1', 'Eval ref 31-40')127 # worksheet.write('R1', 'Eval ref 41-50')128 # worksheet.write('S1', 'Eval ref 51-60')129 # worksheet.write('T1', 'Eval ref 61-70')130 # worksheet.write('U1', 'Eval ref 71-80')131 # worksheet.write('V1', 'Eval ref 81+')132 for i, checkpoint_dir in enumerate(checkpoint_dirs):133 with open(log_dir+"/"+checkpoint_dir+"/training.log", 'r') as f:134 for line in f:135 pass136 last_line = line.strip()137 training = last_line.split(',')138 training = [val.strip() for val in training]139 # if training[0] != '19':140 # continue141 if len(training) != 5:142 continue143 worksheet.write(i+1, 0, 'WD control vs. CU pain vs. Rehab')144 worksheet.write(i+1, 1, lrs[i//5])145 # write TF log output to the A-D cols146 worksheet.write(i+1, 2, training[1])147 worksheet.write(i+1, 3, training[2])148 worksheet.write(i+1, 4, training[3])149 worksheet.write(i+1, 5, training[4])150 # write training images151 worksheet.insert_image(i+1, 6, log_dir+"/"+checkpoint_dir+"/epoch_accuracy.png", {'y_scale': 0.35, 'x_scale': 0.7, 'object_position': 1})152 worksheet.insert_image(i+1, 7, log_dir+"/"+checkpoint_dir+"/epoch_loss.png", {'y_scale': 0.35, 'x_scale': 0.7, 'object_position': 1})153 # worksheet.insert_image(i+1, 8, "logs/"+log_dir+"/"+checkpoint_dir+"/ROC.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})154 # worksheet.insert_image(i+1, 8, "logs/"+log_dir+"/"+checkpoint_dir+"/ROC.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})155 # write testing images156 worksheet.insert_image(i+1, 9, log_dir+"/"+checkpoint_dir+"/confusion_matrix.png", {'y_scale': 0.5, 'x_scale': 0.7, 'object_position': 1})157 # worksheet.insert_image(i+1, 10, log_dir+"/"+checkpoint_dir+"/validation_3d_preds.gif", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})158 # worksheet.insert_image(i+1, 11, "logs/"+log_dir+"/"+checkpoint_dir+"/lyons_pain.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})159 # worksheet.insert_image(i+1, 9, "logs/"+log_dir+"/"+checkpoint_dir+"/CU_pain.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})160 # worksheet.insert_image(i+1, 10, "logs/"+log_dir+"/"+checkpoint_dir+"/CU_control.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})161 # worksheet.insert_image(i+1, 11, "logs/"+log_dir+"/"+checkpoint_dir+"/lyons_pain.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})162 # worksheet.insert_image(i+1, 12, "logs/"+log_dir+"/"+checkpoint_dir+"/lyons_pain_2.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})163 # worksheet.insert_image(i+1, 13, "logs/"+log_dir+"/"+checkpoint_dir+"/glynn_pain.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})164 # worksheet.insert_image(i+1, 14, "logs/"+log_dir+"/"+checkpoint_dir+"/rehab.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})165 # worksheet.insert_image(i+1, 15, "logs/"+log_dir+"/"+checkpoint_dir+"/ref 24-30.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})166 # worksheet.insert_image(i+1, 16, "logs/"+log_dir+"/"+checkpoint_dir+"/ref 31-40.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})167 # worksheet.insert_image(i+1, 17, "logs/"+log_dir+"/"+checkpoint_dir+"/ref 41-50.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})168 # worksheet.insert_image(i+1, 18, "logs/"+log_dir+"/"+checkpoint_dir+"/ref 51-60.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})169 # worksheet.insert_image(i+1, 19, "logs/"+log_dir+"/"+checkpoint_dir+"/ref 61-70.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})170 # worksheet.insert_image(i+1, 20, "logs/"+log_dir+"/"+checkpoint_dir+"/ref 71-80.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})171 # worksheet.insert_image(i+1, 21, "logs/"+log_dir+"/"+checkpoint_dir+"/ref 81+.png", {'y_scale': 0.75, 'x_scale': 0.75, 'object_position': 1})172 worksheet.set_row(i+1, 250)173 worksheet.set_column(6, 21, 80)174 workbook.close()175if __name__ == '__main__':...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1import argparse2import json3import os4import gym5import gym_boxworld6from stable_baselines import A2C, ACKTR, ACER7from stable_baselines.common.policies import CnnPolicy, LstmPolicy8from relational_policies import RelationalPolicy, RelationalLstmPolicy # custom Policy9from stable_baselines.common.vec_env import SubprocVecEnv10from stable_baselines.common import set_global_seeds11from stable_baselines.bench import Monitor12from stable_baselines.common.atari_wrappers import FrameStack13from warehouse_env.warehouse_env import WarehouseEnv14import numpy as np15def saveInLearn(log_dir):16 # A unit of time saved17 unit_time = int(1e5)18 def callback(_locals, _globals):19 num_timesteps = _locals['self'].num_timesteps20 if num_timesteps >= 1 * unit_time and num_timesteps % unit_time == 0:21 _locals['self'].save(log_dir + 'model_{}.zip'.format(num_timesteps))22 return True23 return callback24def make_env(env_id, env_level, rank, log_dir, frame_stack=False, useMonitor=True, seed=0, map_file=None, render_as_observation=False,25 exponential_agent_training_curve=False):26 def _init():27 if env_id == "WarehouseEnv":28# if map_file is "None" or map_file is None:29 simple_agent = np.zeros((11,11)) 30 simple_agent[5,5] = 131# [[ 0, 1, 0, 0, 0, 0, 2, 0, 0],32# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],33# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],34# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],35# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],36# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],37# [ 0, 0, 0, 0, 3, 0, 0, 0, 0]]38# simple_agent = \39# [[ 0, 1, 0, 0, 0, 0, 0, 0, 0],40# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],41# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],42# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],43# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],44# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],45# [ 0, 0, 0, 0, 0, 0, 0, 0, 0]]46 simple_world = np.zeros((11,11))47# [[ 0, 0, 0, 0, 0, 0, 0, 0, 0],48# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],49# [ 0, 0, 0, 0, 1, 0, 0, 0, 0],50# [ 0, 1, 0, 0, 0, 1, 0, 0, 0],51# [ 0, 0, 0, 0, 1, 0, 0, 0, 0],52# [ 0, 0, 0, 0, 0, 0, 0, 0, 0],53# [ 0, 0, 0, 0, 0, 0, 0, 0, 0]]54 env = WarehouseEnv(agent_map=simple_agent, obstacle_map=simple_world,55 render_as_observation=render_as_observation, 56 exponential_agent_training_curve=exponential_agent_training_curve)57 else:58 env = gym.make(env_id, level=env_level)59 if frame_stack:60 env = FrameStack(env, 4)61 if useMonitor:62 env = Monitor(env, log_dir + str(rank), allow_early_resets=True)63 return env64 set_global_seeds(seed)65 return _init66def set_logdir(config):67 log_dir = '{}/{}_{}_{}/log_0/'.format(config.log_dir, config.env_name, config.model_name, config.policy_name)68 # if log_dir exists,auto add new dir by order69 while os.path.exists(log_dir):70 lastdir_name = log_dir.split('/')[-2]71 order = int(lastdir_name.split('_')[-1])72 log_dir = log_dir.replace('_{}'.format(order), '_{}'.format(order + 1))73 os.makedirs(log_dir)74 with open(log_dir + 'config.txt', 'wt') as f:75 json.dump(config.__dict__, f, indent=2)76 print(("--------------------------Create dir:{} Successful!--------------------------\n").format(log_dir))77 return log_dir78def set_env(config, log_dir):79 env_id = "WarehouseEnv" if config.env_name == "WarehouseEnv" else config.env_name + 'NoFrameskip-v4'80 env = SubprocVecEnv([make_env(env_id, config.env_level, i, log_dir, frame_stack=config.frame_stack, map_file=config.map_file,81 render_as_observation=config.render_as_observation) for i in range(config.num_cpu)])82 return env83def set_model(config, env, log_dir):84 if config.timeline:85 from timeline_util import _train_step86 A2C.log_dir = log_dir87 A2C._train_step = _train_step88 policy = {'CnnPolicy': CnnPolicy, 'LstmPolicy': LstmPolicy, 'RelationalPolicy': RelationalPolicy, 'RelationalLstmPolicy': RelationalLstmPolicy}89 base_mode = {'A2C': A2C, "ACKTR": ACKTR, "ACER": ACER}90 # whether reduce oberservation91 policy[config.policy_name].reduce_obs = config.reduce_obs92 n_steps = config.env_steps93 policy_kwargs = dict(feature_extraction=(config.render_as_observation))94 model = base_mode[config.model_name](policy[config.policy_name], env, verbose=1, 95 tensorboard_log=log_dir, 96 n_steps=n_steps, policy_kwargs=policy_kwargs) 97# priming_steps=config.priming_steps, 98# coordinated_planner=config.coordinated_planner)99 print(("--------Algorithm:{} with {} num_cpu:{} total_timesteps:{} Start to train!--------\n")100 .format(config.model_name, config.policy_name, config.num_cpu, config.total_timesteps))101 return model102def run(config):103 log_dir = set_logdir(config)104 env = set_env(config, log_dir)105 model = set_model(config, env, log_dir)106 model.learn(total_timesteps=int(config.total_timesteps), callback=saveInLearn(log_dir) if config.save else None)107 if config.save:108 model.save(log_dir + 'model.zip')109if __name__ == '__main__':110 parser = argparse.ArgumentParser()111 parser.add_argument("env_name", choices=['BoxRandWorld', 'BoxWorld', "WarehouseEnv"], 112 help="Name of environment")113 parser.add_argument("-env_level", choices=['easy', 'medium', 'hard'], default='easy', 114 help="level of environment")115 116 parser.add_argument("-map_file", default='None', type=str, help="Map file")117 118 parser.add_argument("policy_name", choices=['RelationalPolicy', 'CnnPolicy', 'RelationalLstmPolicy', 'LstmPolicy'], 119 help="Name of policy")120 121 parser.add_argument("-model_name", choices=['A2C', 'ACER', 'ACKTR'], 122 default='A2C', help="Name of model")123 parser.add_argument("-reduce_obs", action='store_true')124 parser.add_argument("-timeline", action='store_true', help='performance analysis,default=False')125 parser.add_argument("-frame_stack", action='store_true', help='whether use frame_stack, default=False')126 parser.add_argument("-cuda_device", default='1', help='which cuda device to run, default="1"')127 parser.add_argument("-num_cpu", default=4, type=int, help='number of CPUs')128 parser.add_argument("-total_timesteps", default=2e6, type=float, help='total train timesteps, default=2e6')129 parser.add_argument("-log_dir", default='exp_result', help='log_dir path, default="exp_result"')130 parser.add_argument("-save", action='store_true', help='whether save model to log_dir, default=False')131 132 parser.add_argument("-env_steps", default=50, type=int, help='num steps, default="50"')133 parser.add_argument("-priming_steps", default=1000, type=int, help='priming steps, default="1000"')134 parser.add_argument("-coordinated_planner", action='store_true', help='whether to use a coordinated_planner, default false')135 parser.add_argument("-render_as_observation", action='store_true', help='whether to use a render_as_observation, default false')136 parser.add_argument("-delta_tolling", action='store_true', help='whether to use a delta_tolling, default false')137 parser.add_argument("-random_agent_reset_location", action='store_true', help='whether to use a random_agent_reset_location, default false')138 parser.add_argument("-exponential_agent_training_curve", action='store_true', help='whether to use a exponential_agent_training_curve, default false')139 config = parser.parse_args()140 # print(config)141 os.environ['CUDA_VISIBLE_DEVICES'] = config.cuda_device142 run(config)...

Full Screen

Full Screen

train_nn.py

Source:train_nn.py Github

copy

Full Screen

...39max_epochs = 8040batch_size = 12841def main():42 log_dir = LOG_DIR43 log_dir = init_log_dir(log_dir)44 data = spio.loadmat(DATA, squeeze_me=True)45 x_train = data['datasetInputs'][0]46 y_train = data['datasetTargets'][0]47 x_val = data['datasetInputs'][1]48 y_val = data['datasetTargets'][1]49 x_test = data['datasetInputs'][2]50 y_test = data['datasetTargets'][2]51 x_train = np.array(x_train)52 y_train = np.array(y_train)53 x_test = np.array(x_test)54 y_test = np.array(y_test)55 x_val = np.array(x_val)56 y_val = np.array(y_val)57 model = Sequential()58 model.add(Dense(layers[0], input_dim=900, activation=activation_fns[0]))59 model.add(Dropout(dropouts[0]))60 for i in range(1, len(layers)):61 model.add(Dropout(dropouts[i]))62 model.add(Dense(layers[i], kernel_regularizer=regularizer, activation=activation_fns[i]))63 model.add(Dense(7, activation='softmax'))64 print("[INFO] compiling model...")65 sgd = SGD(lr=learning_rate, momentum=momentum, decay=decay_lr, nesterov=nesterov)66 model.compile(loss='categorical_crossentropy',67 optimizer=sgd, metrics=['accuracy'])68 early_stop_callback = keras.callbacks.EarlyStopping(monitor='val_acc',69 min_delta=early_stop_min_delta, patience=patience, verbose=1, mode='max')70 append_to_callbacks(early_stop_callback)71 if lr_scheduler_fn:72 learning_rate_scheduler = keras.callbacks.LearningRateScheduler(73 lr_scheduler_fn, verbose=0)74 append_to_callbacks(learning_rate_scheduler)75 log_dir = append_params_to_log_dir(log_dir, ['lr_scheduler', lr_scheduler_fn.__name__])76 log_dir = append_params_to_log_dir(log_dir, ['lr_param', lr_param])77 else:78 log_dir = append_params_to_log_dir(log_dir, ['lr_scheduler', lr_scheduler_fn])79 log_dir = check_unique_log_dir(log_dir)80 if tensorboard:81 tensorboard_cb = TensorBoard(log_dir=log_dir, histogram_freq=0,82 write_graph=True, write_images=True)83 append_to_callbacks(tensorboard_cb)84 print("-------------------------------------------------------------------")85 print("Log Directory: " + log_dir)86 print("-------------------------------------------------------------------")87 if normalize_imgs:88 print("[INFO] Normalizing Images..")89 x_train = normalize_input(x_train)90 x_val = normalize_input(x_val)91 x_test = normalize_input(x_test)92 model.fit(x_train, y_train, epochs=max_epochs, batch_size=batch_size,93 validation_data=(x_val, y_val), callbacks=callbacks)94 # show the accuracy on the testing set95 print("[INFO] evaluating on testing set...")96 (loss, accuracy) = model.evaluate(x_test, y_test,97 batch_size=batch_size, verbose=1)98 print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,99 accuracy * 100))100 y_pred = model.predict(x_test, batch_size=batch_size, verbose=1)101 y_pred = y_pred.argmax(1)102 y_true = y_test.argmax(1)103 conf_matrix = confusion_matrix(y_true, y_pred)104 f1_measure = f1_score(y_true, y_pred, average='weighted')105 class_report = classification_report(y_true, y_pred)106 print("F1 measure: " + str(f1_measure))107 print("------------------------------")108 print("Classification report: ")109 print(str(class_report))110 print("------------------------------")111 print_confusion_matrix(conf_matrix, True)112 print_confusion_matrix(conf_matrix, False)113 print ('Saving Model..')114 save_model(model)115def save_model(model):116 # Creates a HDF5 file117 model.save('facial_recognition_model' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + '.h5')118def normalize_input(x):119 pool = multiprocessing.Pool(8)120 for i in range(len(x)):121 x[i] = pool.map(scale,[x[i]])[0]122 return x123def append_params_to_log_dir(log_dir, params):124 log_dir += str.join('_', [str(i) for i in params])125 log_dir += '_'126 return log_dir127def init_log_dir(log_dir):128 log_dir = append_params_to_log_dir(log_dir, activation_fns + layers)129 log_dir = append_params_to_log_dir(log_dir, ['lr', learning_rate])130 log_dir = append_params_to_log_dir(log_dir, ['dr', decay_lr])131 log_dir = append_params_to_log_dir(log_dir, ['m', momentum])132 log_dir = append_params_to_log_dir(log_dir, ['nest', nesterov])133 log_dir = append_params_to_log_dir(log_dir, ['patience', patience])134 log_dir = append_params_to_log_dir(log_dir, ['dropouts'] + dropouts)135 log_dir = append_params_to_log_dir(log_dir, ['reg', regularizer_type])136 return log_dir137def append_to_callbacks(callback):138 callbacks.append(callback)139def check_unique_log_dir(log_dir):140 if os.path.isdir(log_dir):141 print("Log Directory already exists! Appending date to log dir.")142 log_dir += datetime.datetime.now().strftime("%Y-%m-%d %H:%M")143 return log_dir144def print_confusion_matrix(cm, normalize_cm):145 if normalize_cm:146 cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]147 print("Normalized confusion matrix")148 else:149 print("Confusion matrix, without normalization")150 print(cm)151def scale(img):152 img = image_histogram_equalization(img)153 avg = np.average(img)...

Full Screen

Full Screen

utils.py

Source:utils.py Github

copy

Full Screen

...50 composed = np.zeros((h, 2*w, ch))51 composed[:, :w, :] = ap52 composed[:, w:, :] = bp53 return composed54def get_log_dir(log_dir, expt_name):55 """Compose the log_dir with the experiment name."""56 if log_dir is None:57 raise Exception('log_dir can not be None.')58 if expt_name is not None:59 return os.path.join(log_dir, expt_name)60 return log_dir61def mkdir(mypath):62 """Create a directory if it does not exist."""63 try:64 os.makedirs(mypath)65 except OSError as exc:66 if exc.errno == EEXIST and os.path.isdir(mypath):67 pass68 else:69 raise70def create_expt_dir(params):71 """Create the experiment directory and return it."""72 expt_dir = get_log_dir(params.log_dir, params.expt_name)73 # Create directories if they do not exist74 mkdir(params.log_dir)75 mkdir(expt_dir)76 # Save the parameters77 with open(os.path.join(expt_dir, 'params.json'), 'w') as f:78 f.write(json.dumps(params, indent=4, sort_keys=True))79 80 return expt_dir81def plot_loss(loss, label, filename, log_dir):82 """Plot a loss function and save it in a file."""83 plt.figure(figsize=(5, 4))84 plt.plot(loss, label=label)85 plt.legend()86 plt.savefig(os.path.join(log_dir, filename))87 plt.clf()88def log(losses, atob, it_val, N=4, log_dir=DEFAULT_LOG_DIR, expt_name=None,89 is_a_binary=True, is_b_binary=False):90 """Log losses and atob results."""91 log_dir = get_log_dir(log_dir, expt_name)92 # Save the losses for further inspection93 pickle.dump(losses, open(os.path.join(log_dir, 'losses.pkl'), 'wb'))94 ###########################################################################95 # PLOT THE LOSSES #96 ###########################################################################97 plot_loss(losses['d'], 'discriminator', 'd_loss.png', log_dir)98 plot_loss(losses['d_val'], 'discriminator validation', 'd_val_loss.png', log_dir)99 plot_loss(losses['p2p'], 'Pix2Pix', 'p2p_loss.png', log_dir)100 plot_loss(losses['p2p_val'], 'Pix2Pix validation', 'p2p_val_loss.png', log_dir)101 ###########################################################################102 # PLOT THE A->B RESULTS #103 ###########################################################################104 plt.figure(figsize=(12, 8))105 for i in range(N*N):106 a, _ = next(it_val)107 bp = atob.predict(a)108 img = compose_imgs(a[0], bp[0], is_a_binary=is_a_binary, is_b_binary=is_b_binary)109 plt.subplot(N, N, i+1)110 plt.imshow(img)111 plt.axis('off')112 plt.savefig(os.path.join(log_dir, 'atob.png'))113 plt.clf()114 # Make sure all the figures are closed.115 plt.close('all')116def save_weights(models, log_dir=DEFAULT_LOG_DIR, expt_name=None):117 """Save the weights of the models into a file."""118 log_dir = get_log_dir(log_dir, expt_name)119 models.atob.save_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE), overwrite=True)120 models.d.save_weights(os.path.join(log_dir, D_WEIGHTS_FILE), overwrite=True)121def load_weights(atob, d, log_dir=DEFAULT_LOG_DIR, expt_name=None):122 """Load the weights into the corresponding models."""123 log_dir = get_log_dir(log_dir, expt_name)124 atob.load_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE))125 d.load_weights(os.path.join(log_dir, D_WEIGHTS_FILE))126def load_weights_of(m, weights_file, log_dir=DEFAULT_LOG_DIR, expt_name=None):127 """Load the weights of the model m."""128 log_dir = get_log_dir(log_dir, expt_name)129 m.load_weights(os.path.join(log_dir, weights_file))130def load_losses(log_dir=DEFAULT_LOG_DIR, expt_name=None):131 """Load the losses of the given experiment."""132 log_dir = get_log_dir(log_dir, expt_name)133 losses = pickle.load(open(os.path.join(log_dir, 'losses.pkl'), 'rb'))134 return losses135def load_params(params):136 """137 Load the parameters of an experiment and return them.138 The params passed as argument will be merged with the new params dict.139 If there is a conflict with a key, the params passed as argument prevails.140 """141 expt_dir = get_log_dir(params.log_dir, params.expt_name)142 expt_params = json.load(open(os.path.join(expt_dir, 'params.json'), 'rb'))143 # Update the loaded parameters with the current parameters. This will144 # override conflicting keys as expected.145 expt_params.update(params)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful