How to use disable_debug_info method in Slash

Best Python code snippet using slash

CMakeCompiler.py

Source:CMakeCompiler.py Github

copy

Full Screen

1# ----------------------------------------------------------------------2# |3# | CMakeCompiler.py4# |5# | David Brownell <db@DavidBrownell.com>6# | 2019-04-04 08:55:247# |8# ----------------------------------------------------------------------9# |10# | Copyright David Brownell 2019-2211# | Distributed under the Boost Software License, Version 1.0. See12# | accompanying file LICENSE_1_0.txt or copy at13# | http://www.boost.org/LICENSE_1_0.txt.14# |15# ----------------------------------------------------------------------16"""Contains the Compiler object and entry point"""1718import os19import re20import shutil21import sys22import textwrap2324import CommonEnvironment25from CommonEnvironment.CallOnExit import CallOnExit26from CommonEnvironment import CommandLine27from CommonEnvironment import FileSystem28from CommonEnvironment import Interface29from CommonEnvironment import Process30from CommonEnvironment.Shell.All import CurrentShell31from CommonEnvironment.StreamDecorator import StreamDecorator32from CommonEnvironment import StringHelpers3334from CommonEnvironment.CompilerImpl import Compiler as CompilerMod35from CommonEnvironment.CompilerImpl.InputProcessingMixin.IndividualInputProcessingMixin import (36 IndividualInputProcessingMixin,37)38from CommonEnvironment.CompilerImpl.InvocationMixin.CommandLineInvocationMixin import (39 CommandLineInvocationMixin,40)41from CommonEnvironment.CompilerImpl.InvocationQueryMixin.ConditionalInvocationQueryMixin import (42 ConditionalInvocationQueryMixin,43)44from CommonEnvironment.CompilerImpl.OutputMixin.MultipleOutputMixin import (45 MultipleOutputMixin,46)4748from CommonEnvironment.TypeInfo.FundamentalTypes.DirectoryTypeInfo import (49 DirectoryTypeInfo,50)5152# ----------------------------------------------------------------------53_script_fullpath = CommonEnvironment.ThisFullpath()54_script_dir, _script_name = os.path.split(_script_fullpath)55# ----------------------------------------------------------------------5657# ----------------------------------------------------------------------58@Interface.staticderived59class Compiler(60 IndividualInputProcessingMixin,61 CommandLineInvocationMixin,62 ConditionalInvocationQueryMixin,63 MultipleOutputMixin,64 CompilerMod.Compiler,65):66 """Compiles a CMAKE directory"""6768 # ----------------------------------------------------------------------69 # | Properties70 Name = Interface.DerivedProperty("CMake")71 Description = Interface.DerivedProperty("Compiles a CMake directory.")72 InputTypeInfo = Interface.DerivedProperty(DirectoryTypeInfo())7374 # ----------------------------------------------------------------------75 # | Methods76 @staticmethod77 @Interface.override78 def IsSupportedContent(filename):79 return os.path.isfile(os.path.join(filename, "CMakeLists.txt"))8081 # ----------------------------------------------------------------------82 @staticmethod83 @Interface.override84 def IsSupportedTestItem(item):85 return item.endswith("Tests")8687 # ----------------------------------------------------------------------88 @staticmethod89 @Interface.override90 def CreateInvokeCommandLine(context, verbose_stream):91 return 'cmake --build "{build}"'.format(92 build=context["output_dir"],93 )9495 # ----------------------------------------------------------------------96 @staticmethod97 @Interface.override98 def RemoveTemporaryArtifacts(context):99 output_dir = context["output_dir"]100101 # Move GCC-generated profile data to the output dir102 for filename in FileSystem.WalkFiles(103 output_dir,104 include_file_extensions=[".gcno", ".gcda"],105 ):106 dest_filename = os.path.join(output_dir, os.path.basename(filename))107 if dest_filename == filename:108 continue109110 if not os.path.isfile(dest_filename):111 shutil.copyfile(filename, dest_filename)112113 for potential_dir in ["CMakeFiles", "Testing"]:114 potential_dir = os.path.join(output_dir, potential_dir)115 FileSystem.RemoveTree(potential_dir)116117 for potential_file in ["CMakeCache.txt", "cmake_install.cmake", "Makefile"]:118 potential_file = os.path.join(output_dir, potential_file)119 FileSystem.RemoveFile(potential_file)120121 remove_extensions = set([".ilk"])122123 for item in os.listdir(output_dir):124 if os.path.splitext(item)[1] not in remove_extensions:125 continue126127 fullpath = os.path.join(output_dir, item)128 FileSystem.RemoveFile(fullpath)129130 # ----------------------------------------------------------------------131 @staticmethod132 @Interface.override133 def ExecuteExclusively(context):134 # Don't allow the parallel execution of cmake files, as each of them135 # internally will compile on all available threads.136 return True137138 # ----------------------------------------------------------------------139 # ----------------------------------------------------------------------140 # ----------------------------------------------------------------------141 @classmethod142 @Interface.override143 def _GetOptionalMetadata(cls):144 return [145 (146 "generator",147 None148 if os.getenv("DEVELOPMENT_ENVIRONMENT_CPP_USE_DEFAULT_CMAKE_GENERATOR")149 else "Ninja",150 ),151 ("is_debug", True),152 ("cmake_debug_output", False),153 ("use_unicode", False),154 (155 "static_crt",156 False if os.getenv("DEVELOPMENT_ENVIRONMENT_CPP_NO_STATIC_CRT") else True,157 ),158 ("is_profile", False),159 ("is_benchmark", False),160 ("disable_debug_info", False),161 ("disable_aslr", False),162 ] + super(Compiler, cls)._GetOptionalMetadata()163164 # ----------------------------------------------------------------------165 @classmethod166 @Interface.override167 def _GetRequiredContextNames(cls):168 return ["output_dir"] + super(Compiler, cls)._GetRequiredContextNames()169170 # ----------------------------------------------------------------------171 @classmethod172 @Interface.override173 def _CreateContext(cls, metadata, status_stream):174 if "output_dir" not in metadata:175 return metadata176177 # Invoke cmake to get a list of the generated files. The best way that I have found178 # to do this is to parse generated dot files (as we don't want to get into the business179 # of parsing cmake files).180 temp_directory = CurrentShell.CreateTempDirectory()181 with CallOnExit(lambda: FileSystem.RemoveTree(temp_directory)):182 dot_filename = os.path.join(temp_directory, "generated.dot")183184 command_line_options = [185 '-S "{}"'.format(metadata["input"]),186 '-B "{}"'.format(metadata["output_dir"]),187 '"--graphviz={}"'.format(dot_filename),188 "-DCMAKE_BUILD_TYPE={}".format(189 "Debug" if metadata["is_debug"] else "Release",190 ),191 "-DCppCommon_CMAKE_DEBUG_OUTPUT={}".format(192 "ON" if metadata["cmake_debug_output"] else "OFF",193 ),194 "-DCppCommon_UNICODE={}".format(195 "ON" if metadata["use_unicode"] else "OFF",196 ),197 "-DCppCommon_STATIC_CRT={}".format(198 "ON" if metadata["static_crt"] else "OFF",199 ),200 "-DCppCommon_CODE_COVERAGE={}".format(201 "ON" if metadata["is_profile"] else "OFF",202 ),203 "-DCppCommon_NO_DEBUG_INFO={}".format(204 "ON" if metadata["disable_debug_info"] else "OFF",205 ),206 "-DCppCommon_NO_ADDRESS_SPACE_LAYOUT_RANDOMIZATION={}".format(207 "ON" if metadata["disable_aslr"] else "OFF",208 ),209 ]210211 if metadata["generator"]:212 command_line_options.append('-G "{}"'.format(metadata["generator"]))213214 result, output = Process.Execute(215 "cmake {}".format(" ".join(command_line_options)),216 )217 if result != 0:218 raise Exception(219 textwrap.dedent(220 """\221 cmake failed ({}):222 {}223 """,224 ).format(result, StringHelpers.LeftJustify(output, 4)),225 )226227 # Parse the dot file. This regular expression has been configured to work228 # with dot files generated by:229 #230 # - CMake 3.13.4231 # - CMake 3.17.2232 #233 regex = re.compile(234 r"""(?#235 node )\"node\d+\"\s*(?#236 lbracket )\[\s*(?#237 label key )label\s*=\s*(?#238 name )\"(?P<name>.+?)\"(?#239 [optional] comma delimiter ),?\s+(?#240 shape key )shape\s*=\s*(?#241 value )(?:\"house\"|egg)\s*(?#242 rbracket )\](?#243 terminator );(?#244 )""",245 )246247 with open(dot_filename) as f:248 content = f.read()249250 output_filenames = []251252 for match in regex.finditer(content):253 output_filenames.append(254 os.path.join(255 metadata["output_dir"],256 CurrentShell.CreateExecutableName(match.group("name")),257 ),258 )259260 metadata["output_filenames"] = output_filenames261262 return super(Compiler, cls)._CreateContext(metadata, status_stream)263264265# ----------------------------------------------------------------------266@CommandLine.EntryPoint267@CommandLine.Constraints(268 output_dir=CommandLine.DirectoryTypeInfo(269 ensure_exists=False,270 ),271 input=CommandLine.DirectoryTypeInfo(272 arity="+",273 ),274 generator=CommandLine.StringTypeInfo(275 arity="?",276 ),277 output_stream=None,278)279def Compile(280 output_dir,281 input,282 generator=None,283 release=False,284 cmake_debug_output=False,285 unicode=False,286 no_static_crt=False,287 profile=False,288 benchmark=False,289 disable_debug_info=False,290 disable_aslr=False,291 output_stream=sys.stdout,292 verbose=False,293):294 """Compiles a CMake directory"""295296 inputs = input297 del input298299 return CompilerMod.CommandLineCompile(300 Compiler,301 inputs,302 StreamDecorator(output_stream),303 verbose,304 output_dir=output_dir,305 is_debug=not release,306 cmake_debug_output=cmake_debug_output,307 use_unicode=unicode,308 static_crt=not no_static_crt,309 is_profile=profile,310 is_benchmark=benchmark,311 disable_debug_info=disable_debug_info,312 disable_aslr=disable_aslr,313 )314315316# ----------------------------------------------------------------------317# ----------------------------------------------------------------------318# ----------------------------------------------------------------------319if __name__ == "__main__":320 try:321 sys.exit(CommandLine.Main())322 except KeyboardInterrupt: ...

Full Screen

Full Screen

fine tuning.py

Source:fine tuning.py Github

copy

Full Screen

1import tensorflow as tf2import numpy as np3from images import load_images45X_train, y_train, X_test, y_test, X_val, y_val = load_images(shuffle_before=True)6input_shape = X_train.shape[1:]78disable_debug_info = False9if disable_debug_info:10 import os11 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'121314def new_design_va80(augment_data=False, tune_hypers=False, learning_rate=3.4e-4, batch_size=26, return_results=False):1516 initializer = tf.keras.initializers.VarianceScaling(2)1718 layers = [19 tf.keras.layers.Conv2D(64, 3, 1, 'same', input_shape=input_shape, kernel_initializer=initializer, activation='relu'),20 tf.keras.layers.BatchNormalization(),21 tf.keras.layers.Conv2D(64, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),2223 tf.keras.layers.MaxPooling2D(),24 tf.keras.layers.BatchNormalization(),2526 tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),27 tf.keras.layers.BatchNormalization(),28 tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),2930 tf.keras.layers.MaxPooling2D(),31 tf.keras.layers.BatchNormalization(),3233 tf.keras.layers.Conv2D(256, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),34 tf.keras.layers.BatchNormalization(),35 tf.keras.layers.Conv2D(256, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),3637 tf.keras.layers.MaxPooling2D(),38 tf.keras.layers.BatchNormalization(),3940 tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),41 tf.keras.layers.BatchNormalization(),42 tf.keras.layers.Conv2D(64, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),4344 tf.keras.layers.MaxPooling2D(),45 tf.keras.layers.BatchNormalization(),4647 tf.keras.layers.Conv2D(8, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),48 tf.keras.layers.BatchNormalization(),4950 # tf.keras.layers.Conv2D(6, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),51 # In kernel_initializer, the 2 is removed so that the initialization returns to Xavier initialization instead of52 # Kaiming initialization. Xaiver initialization should in theory work better since Kaiming initialization53 # is meant for ReLU, but the final activation is softmax. However, in practice, it does not make a difference.54 tf.keras.layers.Conv2D(6, 3, 1, 'same', kernel_initializer=tf.keras.initializers.VarianceScaling(), activation='relu'),55 tf.keras.layers.GlobalAveragePooling2D(),56 tf.keras.layers.Activation('softmax')57 ]5859 callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)6061 model = tf.keras.models.Sequential(layers)6263 train_loss = []64 val_loss = []65 train_acc = []66 val_acc = []67 augmentor = tf.keras.preprocessing.image.ImageDataGenerator(horizontal_flip=True, vertical_flip=True)68 optimizer = tf.keras.optimizers.RMSprop()6970 if not tune_hypers:71 for fit in range(2):72 # validation_accuracies are the result of training 7 models independently and averaging validation_accuracies7374 # 48x48 / 64x64-75 # 48x48 images - 81.305933% accuracy76 # 64x64 images - 82.466024% accuracy77 # model.compile(tf.keras.optimizers.RMSprop(learning_rate=2.5e-4 * 10**-fit), loss=tf.keras.validation_losses.sparse_categorical_crossentropy, metrics=['accuracy'])7879 # 100x10080 # 100x100 images - 85.449123% accuracy81 # model.compile(tf.keras.optimizers.RMSprop(learning_rate=6e-4 * 10**-fit), loss=tf.keras.validation_losses.sparse_categorical_crossentropy, metrics=['accuracy'])8283 # 128x12884 # 128x128 images - 88.067615% accuracy85 # model.compile(tf.keras.optimizers.RMSprop(learning_rate=4.5e-4 * 10**-fit), loss=tf.keras.validation_losses.sparse_categorical_crossentropy, metrics=['accuracy'])86 # model.compile(tf.keras.optimizers.RMSprop(learning_rate=2.5e-4 * 10**-fit), loss=tf.keras.validation_losses.sparse_categorical_crossentropy, metrics=['accuracy'])87 # model.compile(tf.keras.optimizers.RMSprop(learning_rate=3e-4 * 5**-fit), loss=tf.keras.validation_losses.sparse_categorical_crossentropy, metrics=['accuracy'])88 # the good one is below89 # model.compile(tf.keras.optimizers.RMSprop(learning_rate=2.5e-4 * 5**-fit), loss=tf.keras.validation_losses.sparse_categorical_crossentropy, metrics=['accuracy'])90 optimizer.learning_rate = 1e-4 * 10**-fit9192 model.compile(optimizer, loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])9394 # 156x15695 # 156x156 images - 86.54293% accuracy96 # model.compile(tf.keras.optimizers.RMSprop(learning_rate=4.5e-4 * 10**-fit), loss=tf.keras.validation_losses.sparse_categorical_crossentropy, metrics=['accuracy'])9798 if not augment_data:99 # history = model.fit(X_train, y_train, 26, epochs=25, validation_data=(X_val, y_val), verbose=2, callbacks=[callback])100 history = model.fit(X_train, y_train, 26, epochs=16, validation_data=(X_val, y_val), verbose=2, callbacks=[callback])101102 print()103 train_loss.extend(history.history['loss'])104 val_loss.extend(history.history['val_loss'])105 train_acc.extend(history.history['accuracy'])106 val_acc.extend(history.history['val_accuracy'])107108 else:109 highest_val_loss = np.inf110 count = 0111 patience = 3112 num_epochs = 18113 model.compile(optimizer, loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])114 for epoch in range(num_epochs):115 print(f'Epoch: {epoch+1}/{num_epochs}')116 data = augmentor.flow(X_train, y_train, batch_size=20)117 history = model.fit_generator(data, validation_data=(X_val, y_val), epochs=1, verbose=2)118 train_loss.extend(history.history['loss'])119 val_loss.extend(history.history['val_loss'])120 current_val_loss = history.history['val_loss'][-1]121 train_acc.extend(history.history['accuracy'])122 current_val_acc = history.history['val_accuracy'][-1]123 val_acc.append(current_val_acc)124 count += 1125 if highest_val_loss - current_val_loss > 0:126 count = 0127 highest_val_loss = current_val_loss128 if count == patience:129 break130131 # test = input.txt('break?')132 # if test == 'yes':133 # break134135 print()136137 test_results = model.evaluate(X_test, y_test)138 print(input_shape)139140 if return_results:141 return test_results[1]142143 else:144 model.compile(learning_rate=learning_rate, loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])145 history = model.fit(X_train, y_train, batch_size, epochs=12, validation_data=(X_val, y_val), verbose=0)146 train_acc = model.evaluate(X_train, y_train, verbose=0)[1]147 val_acc = model.evaluate(X_val, y_val, verbose=0)[1]148149 return train_acc, val_acc150151 import matplotlib.pyplot as plt152153 fig, axs = plt.subplots(2, 1, constrained_layout=True)154 # fig.suptitle(f'{material}, {label}', fontsize=18)155156 axs[0].plot(train_loss, label='Train loss')157 axs[0].plot(val_loss, label='Validation loss')158 axs[0].set_title('Train and Validation Losses')159 axs[0].set_xlabel('Epoch')160 axs[0].set_ylabel('Loss')161 axs[0].legend(loc='upper right')162163 axs[1].plot(train_acc, label='Train accuracy')164 axs[1].plot(val_acc, label='Validation Accuracy')165 axs[1].set_title('Train and Validation Accuracies')166 axs[1].set_xlabel('Epoch')167 axs[1].set_ylabel('Accuracy')168 axs[1].legend(loc='lower right')169170 plt.show()171172 # plt.plot(train_loss, label='Train loss')173 # plt.plot(val_loss, label='Validation loss')174 # plt.xlabel('Epoch')175 # plt.ylabel('Loss')176 # plt.legend(loc='upper right')177 # plt.show()178179180def random_search(num_iters):181 for _ in range(num_iters):182 lr = np.random.uniform(2e-4, 6e-4)183184 train_acc, val_acc = new_design_va80(False, True, lr)185186 print(f'LR: {lr:e} | Train Accuracy: {train_acc:.5f} | Val Accuracy: {val_acc:.5f}')187 print()188189190def check_mean_accuracy(num_models=4, augment_data=False):191 accuracies = []192 for _ in range(num_models):193 global X_train, y_train, X_test, y_test, X_val, y_val, input_shape194 X_train, y_train, X_test, y_test, X_val, y_val = load_images(shuffle_before=True)195 input_shape = X_train.shape[1:]196 accuracies.append(new_design_va80(augment_data=augment_data, return_results=True))197198 print(np.mean(accuracies))199200201new_design_va80(augment_data=True) ...

Full Screen

Full Screen

final.py

Source:final.py Github

copy

Full Screen

1import tensorflow as tf2import numpy as np34from images import load_images5tf.config.experimental.set_memory_growth(tf.config.list_physical_devices('GPU')[0], True)6X_train, y_train, X_test, y_test, X_val, y_val = load_images(shuffle_before=True, num_augments=3) # 4789input_shape = X_train.shape[1:]1011disable_debug_info = False12if disable_debug_info:13 import os14 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'151617def create_and_train_TARnet(return_results=False, save_history=False, save_file='TARnet.hdf5'):1819 initializer = tf.keras.initializers.VarianceScaling(2)2021 layers = [22 tf.keras.layers.Conv2D(64, 3, 1, 'same', input_shape=input_shape, kernel_initializer=initializer, activation='relu'),23 tf.keras.layers.BatchNormalization(),24 tf.keras.layers.Conv2D(64, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),2526 tf.keras.layers.MaxPooling2D(),27 tf.keras.layers.BatchNormalization(),2829 tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),30 tf.keras.layers.BatchNormalization(),31 tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),3233 tf.keras.layers.MaxPooling2D(),34 tf.keras.layers.BatchNormalization(),3536 tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),37 tf.keras.layers.BatchNormalization(),38 tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),3940 tf.keras.layers.MaxPooling2D(),41 tf.keras.layers.BatchNormalization(),4243 tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),44 tf.keras.layers.BatchNormalization(),45 tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),4647 # tf.keras.layers.MaxPooling2D(),48 # tf.keras.layers.BatchNormalization(),49 #50 # tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),51 # tf.keras.layers.BatchNormalization(),52 # tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),5354 # tf.keras.layers.MaxPooling2D(),55 # tf.keras.layers.BatchNormalization(),56 #57 # tf.keras.layers.Conv2D(128, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),58 # tf.keras.layers.BatchNormalization(),59 # tf.keras.layers.Conv2D(64, 3, 1, 'same', kernel_initializer=initializer, activation='relu'),6061 tf.keras.layers.MaxPooling2D(),62 tf.keras.layers.BatchNormalization(),6364 tf.keras.layers.Conv2D(6, 3, 1, 'same', kernel_initializer=initializer),65 tf.keras.layers.GlobalAveragePooling2D(),66 # tf.keras.layers.Flatten(), 67 # tf.keras.layers.Dense(6),68 tf.keras.layers.Activation('softmax')69 ]7071 model = tf.keras.models.Sequential(layers)7273 train_loss = []74 val_loss = []75 train_acc = []76 val_acc = []7778 early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3) 79 save_best_model = tf.keras.callbacks.ModelCheckpoint(save_file, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True) 8081 first_learning_rate = 2.26e-482 second_learning_rate = 3.35e-58384 optimizer = tf.keras.optimizers.RMSprop()85 model.compile(optimizer, loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])8687 epoch_zero_train_loss, epoch_zero_train_acc = model.evaluate(X_train, y_train, verbose=0)88 epoch_zero_val_loss, epoch_zero_val_acc = model.evaluate(X_val, y_val, verbose=0)89 train_loss.append(epoch_zero_train_loss)90 val_loss.append(epoch_zero_val_loss)91 train_acc.append(epoch_zero_train_acc)92 val_acc.append(epoch_zero_val_acc)93 print('Before training')94 print(f'{X_train.shape[0]}/{X_train.shape[0]} - ?s - loss: {epoch_zero_train_loss:.4f} - accuracy: {epoch_zero_train_acc:.4f} - val_loss: {epoch_zero_val_loss:.4f} - val_accuracy: {epoch_zero_val_acc:.4f}')9596 print(f'\nFirst Training Session | lr: {first_learning_rate:.2e}')97 optimizer.learning_rate = first_learning_rate98 history = model.fit(X_train, y_train, 26, epochs=16, validation_data=(X_val, y_val), verbose=2, callbacks=[early_stopping, save_best_model])99 train_loss.extend(history.history['loss'])100 val_loss.extend(history.history['val_loss'])101 train_acc.extend(history.history['accuracy'])102 val_acc.extend(history.history['val_accuracy'])103104 print(f'\nSecond Training Session | lr: {second_learning_rate:.2e}')105 model.load_weights(save_file)106 optimizer.learning_rate = second_learning_rate107 history = model.fit(X_train, y_train, 26, epochs=14, validation_data=(X_val, y_val), verbose=2, callbacks=[early_stopping, save_best_model])108 train_loss.extend(history.history['loss'])109 val_loss.extend(history.history['val_loss'])110 train_acc.extend(history.history['accuracy'])111 val_acc.extend(history.history['val_accuracy'])112113 model.load_weights(save_file)114 test_results = model.evaluate(X_test, y_test)115116 if save_history:117 train_loss = np.asarray(train_loss)118 train_acc = np.asarray(train_acc)119 val_loss = np.asarray(val_loss)120 val_acc = np.asarray(val_acc)121 np.save(f'training_losses/TARnet.npy', train_loss)122 np.save(f'training_accuracies/TARnet.npy', train_acc)123 np.save(f'validation_losses/TARnet.npy', val_loss)124 np.save(f'validation_accuracies/TARnet.npy', val_acc)125126 if return_results:127 return test_results[1]128129 import matplotlib.pyplot as plt130131 fig, axs = plt.subplots(2, 1, constrained_layout=True)132133 axs[0].plot(train_loss, label='Train loss')134 axs[0].plot(val_loss, label='Validation loss')135 axs[0].set_title('Train and Validation Losses of TARnet', fontsize=18)136 axs[0].set_xlabel('Epoch')137 axs[0].set_ylabel('Loss')138 axs[0].legend(loc='upper right')139 axs[0].set_xlim(0, len(train_acc))140141 axs[1].plot(train_acc, label='Train accuracy')142 axs[1].plot(val_acc, label='Validation Accuracy')143 axs[1].set_title('Train and Validation Accuracies of TARnet', fontsize=18)144 axs[1].set_xlabel('Epoch')145 axs[1].set_ylabel('Accuracy')146 axs[1].hlines(0.85, 0, len(train_acc), label='85% Accuracy')147 axs[1].legend(loc='lower right')148149 plt.xlim(0, len(train_acc))150151 plt.show()152153 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful