How to use running_config method in autotest

Best Python code snippet using autotest_python

main.py

Source:main.py Github

copy

Full Screen

1#!/usr/bin/env python2# -*- coding: utf-8 -*-3# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.4#5# Licensed under the Apache License, Version 2.0 (the "License");6# you may not use this file except in compliance with the License.7# You may obtain a copy of the License at8#9# http://www.apache.org/licenses/LICENSE-2.010#11# Unless required by applicable law or agreed to in writing, software12# distributed under the License is distributed on an "AS IS" BASIS,13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.14# See the License for the specific language governing permissions and15# limitations under the License.16import os17import warnings18warnings.simplefilter("ignore")19import tensorflow as tf20import horovod.tensorflow as hvd21from utils import hvd_utils22from runtime import Runner23from utils.cmdline_helper import parse_cmdline24if __name__ == "__main__":25 tf.logging.set_verbosity(tf.logging.ERROR)26 FLAGS = parse_cmdline()27 RUNNING_CONFIG = tf.contrib.training.HParams(28 mode=FLAGS.mode,29 # ======= Directory HParams ======= #30 log_dir=FLAGS.results_dir,31 model_dir=FLAGS.results_dir,32 summaries_dir=FLAGS.results_dir,33 data_dir=FLAGS.data_dir,34 data_idx_dir=FLAGS.data_idx_dir,35 # ========= Model HParams ========= #36 n_classes=1001,37 input_format='NHWC',38 compute_format=FLAGS.data_format,39 dtype=tf.float32 if FLAGS.precision == "fp32" else tf.float16,40 height=224,41 width=224,42 n_channels=3,43 # ======= Training HParams ======== #44 iter_unit=FLAGS.iter_unit,45 num_iter=FLAGS.num_iter,46 warmup_steps=FLAGS.warmup_steps,47 batch_size=FLAGS.batch_size,48 log_every_n_steps=FLAGS.display_every,49 lr_init=FLAGS.lr_init,50 lr_warmup_epochs=FLAGS.lr_warmup_epochs,51 weight_decay=FLAGS.weight_decay,52 momentum=FLAGS.momentum,53 loss_scale=FLAGS.loss_scale,54 label_smoothing=FLAGS.label_smoothing,55 use_cosine_lr=FLAGS.use_cosine_lr,56 use_static_loss_scaling=FLAGS.use_static_loss_scaling,57 distort_colors=False,58 59 # ======= Optimization HParams ======== #60 use_xla=FLAGS.use_xla,61 use_tf_amp=FLAGS.use_tf_amp,62 use_dali=FLAGS.use_dali,63 gpu_memory_fraction=FLAGS.gpu_memory_fraction,64 65 seed=FLAGS.seed,66 )67 # ===================================68 runner = Runner(69 # ========= Model HParams ========= #70 n_classes=RUNNING_CONFIG.n_classes,71 input_format=RUNNING_CONFIG.input_format,72 compute_format=RUNNING_CONFIG.compute_format,73 dtype=RUNNING_CONFIG.dtype,74 n_channels=RUNNING_CONFIG.n_channels,75 height=RUNNING_CONFIG.height,76 width=RUNNING_CONFIG.width,77 distort_colors=RUNNING_CONFIG.distort_colors,78 log_dir=RUNNING_CONFIG.log_dir,79 model_dir=RUNNING_CONFIG.model_dir,80 data_dir=RUNNING_CONFIG.data_dir,81 data_idx_dir=RUNNING_CONFIG.data_idx_dir,82 # ======= Optimization HParams ======== #83 use_xla=RUNNING_CONFIG.use_xla,84 use_tf_amp=RUNNING_CONFIG.use_tf_amp,85 use_dali=RUNNING_CONFIG.use_dali,86 gpu_memory_fraction=RUNNING_CONFIG.gpu_memory_fraction,87 seed=RUNNING_CONFIG.seed88 )89 if RUNNING_CONFIG.mode in ["train", "train_and_evaluate", "training_benchmark"]:90 runner.train(91 iter_unit=RUNNING_CONFIG.iter_unit,92 num_iter=RUNNING_CONFIG.num_iter,93 batch_size=RUNNING_CONFIG.batch_size,94 warmup_steps=RUNNING_CONFIG.warmup_steps,95 log_every_n_steps=RUNNING_CONFIG.log_every_n_steps,96 weight_decay=RUNNING_CONFIG.weight_decay,97 lr_init=RUNNING_CONFIG.lr_init,98 lr_warmup_epochs=RUNNING_CONFIG.lr_warmup_epochs,99 momentum=RUNNING_CONFIG.momentum,100 loss_scale=RUNNING_CONFIG.loss_scale, 101 label_smoothing=RUNNING_CONFIG.label_smoothing,102 use_static_loss_scaling=RUNNING_CONFIG.use_static_loss_scaling,103 use_cosine_lr=RUNNING_CONFIG.use_cosine_lr,104 is_benchmark=RUNNING_CONFIG.mode == 'training_benchmark',105 106 )107 if RUNNING_CONFIG.mode in ["train_and_evaluate", 'evaluate', 'inference_benchmark']:108 if RUNNING_CONFIG.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():109 raise NotImplementedError("Only single GPU inference is implemented.")110 elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:111 runner.evaluate(112 iter_unit=RUNNING_CONFIG.iter_unit if RUNNING_CONFIG.mode != "train_and_evaluate" else "epoch",113 num_iter=RUNNING_CONFIG.num_iter if RUNNING_CONFIG.mode != "train_and_evaluate" else 1,114 warmup_steps=RUNNING_CONFIG.warmup_steps,115 batch_size=RUNNING_CONFIG.batch_size,116 log_every_n_steps=RUNNING_CONFIG.log_every_n_steps,117 is_benchmark=RUNNING_CONFIG.mode == 'inference_benchmark'...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful