How to use rate method in avocado

Best Python code snippet using avocado_python

learning_schedules.py

Source:learning_schedules.py Github

copy

Full Screen

...24 """Helper function to return proper learning rate based on tf version."""25 if tf.executing_eagerly():26 return eager_decay_rate27 else:28 return eager_decay_rate()29def exponential_decay_with_burnin(global_step,30 learning_rate_base,31 learning_rate_decay_steps,32 learning_rate_decay_factor,33 burnin_learning_rate=0.0,34 burnin_steps=0,35 min_learning_rate=0.0,36 staircase=True):37 """Exponential decay schedule with burn-in period.38 In this schedule, learning rate is fixed at burnin_learning_rate39 for a fixed period, before transitioning to a regular exponential40 decay schedule.41 Args:42 global_step: int tensor representing global step.43 learning_rate_base: base learning rate.44 learning_rate_decay_steps: steps to take between decaying the learning rate.45 Note that this includes the number of burn-in steps.46 learning_rate_decay_factor: multiplicative factor by which to decay47 learning rate.48 burnin_learning_rate: initial learning rate during burn-in period. If49 0.0 (which is the default), then the burn-in learning rate is simply50 set to learning_rate_base.51 burnin_steps: number of steps to use burnin learning rate.52 min_learning_rate: the minimum learning rate.53 staircase: whether use staircase decay.54 Returns:55 If executing eagerly:56 returns a no-arg callable that outputs the (scalar)57 float tensor learning rate given the current value of global_step.58 If in a graph:59 immediately returns a (scalar) float tensor representing learning rate.60 """61 if burnin_learning_rate == 0:62 burnin_learning_rate = learning_rate_base63 def eager_decay_rate():64 """Callable to compute the learning rate."""65 post_burnin_learning_rate = tf.train.exponential_decay(66 learning_rate_base,67 global_step - burnin_steps,68 learning_rate_decay_steps,69 learning_rate_decay_factor,70 staircase=staircase)71 if callable(post_burnin_learning_rate):72 post_burnin_learning_rate = post_burnin_learning_rate()73 return tf.maximum(tf.where(74 tf.less(tf.cast(global_step, tf.int32), tf.constant(burnin_steps)),75 tf.constant(burnin_learning_rate),76 post_burnin_learning_rate), min_learning_rate, name='learning_rate')77 return _learning_rate_return_value(eager_decay_rate)78def exponential_decay_with_warmup(global_step,79 learning_rate_base,80 learning_rate_decay_steps,81 learning_rate_decay_factor,82 warmup_learning_rate=0.0,83 warmup_steps=0,84 min_learning_rate=0.0,85 staircase=True):86 """Exponential decay schedule with warm up period.87 Args:88 global_step: int tensor representing global step.89 learning_rate_base: base learning rate.90 learning_rate_decay_steps: steps to take between decaying the learning rate.91 Note that this includes the number of burn-in steps.92 learning_rate_decay_factor: multiplicative factor by which to decay learning93 rate.94 warmup_learning_rate: initial learning rate during warmup period.95 warmup_steps: number of steps to use warmup learning rate.96 min_learning_rate: the minimum learning rate.97 staircase: whether use staircase decay.98 Returns:99 If executing eagerly:100 returns a no-arg callable that outputs the (scalar)101 float tensor learning rate given the current value of global_step.102 If in a graph:103 immediately returns a (scalar) float tensor representing learning rate.104 """105 def eager_decay_rate():106 """Callable to compute the learning rate."""107 post_warmup_learning_rate = tf.train.exponential_decay(108 learning_rate_base,109 global_step - warmup_steps,110 learning_rate_decay_steps,111 learning_rate_decay_factor,112 staircase=staircase)113 if callable(post_warmup_learning_rate):114 post_warmup_learning_rate = post_warmup_learning_rate()115 if learning_rate_base < warmup_learning_rate:116 raise ValueError('learning_rate_base must be larger or equal to '117 'warmup_learning_rate.')118 slope = (learning_rate_base - warmup_learning_rate) / warmup_steps119 warmup_rate = slope * tf.cast(global_step,120 tf.float32) + warmup_learning_rate121 learning_rate = tf.where(122 tf.less(tf.cast(global_step, tf.int32), tf.constant(warmup_steps)),123 warmup_rate,124 tf.maximum(post_warmup_learning_rate, min_learning_rate),125 name='learning_rate')126 return learning_rate127 return _learning_rate_return_value(eager_decay_rate)128def cosine_decay_with_warmup(global_step,129 learning_rate_base,130 total_steps,131 warmup_learning_rate=0.0,132 warmup_steps=0,133 hold_base_rate_steps=0):134 """Cosine decay schedule with warm up period.135 Cosine annealing learning rate as described in:136 Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.137 ICLR 2017. https://arxiv.org/abs/1608.03983138 In this schedule, the learning rate grows linearly from warmup_learning_rate139 to learning_rate_base for warmup_steps, then transitions to a cosine decay140 schedule.141 Args:142 global_step: int64 (scalar) tensor representing global step.143 learning_rate_base: base learning rate.144 total_steps: total number of training steps.145 warmup_learning_rate: initial learning rate for warm up.146 warmup_steps: number of warmup steps.147 hold_base_rate_steps: Optional number of steps to hold base learning rate148 before decaying.149 Returns:150 If executing eagerly:151 returns a no-arg callable that outputs the (scalar)152 float tensor learning rate given the current value of global_step.153 If in a graph:154 immediately returns a (scalar) float tensor representing learning rate.155 Raises:156 ValueError: if warmup_learning_rate is larger than learning_rate_base,157 or if warmup_steps is larger than total_steps.158 """159 if total_steps < warmup_steps:160 raise ValueError('total_steps must be larger or equal to '161 'warmup_steps.')162 def eager_decay_rate():163 """Callable to compute the learning rate."""164 learning_rate = 0.5 * learning_rate_base * (1 + tf.cos(165 np.pi *166 (tf.cast(global_step, tf.float32) - warmup_steps - hold_base_rate_steps167 ) / float(total_steps - warmup_steps - hold_base_rate_steps)))168 if hold_base_rate_steps > 0:169 learning_rate = tf.where(170 global_step > warmup_steps + hold_base_rate_steps,171 learning_rate, learning_rate_base)172 if warmup_steps > 0:173 if learning_rate_base < warmup_learning_rate:174 raise ValueError('learning_rate_base must be larger or equal to '175 'warmup_learning_rate.')176 slope = (learning_rate_base - warmup_learning_rate) / warmup_steps177 warmup_rate = slope * tf.cast(global_step,178 tf.float32) + warmup_learning_rate179 learning_rate = tf.where(global_step < warmup_steps, warmup_rate,180 learning_rate)181 return tf.where(global_step > total_steps, 0.0, learning_rate,182 name='learning_rate')183 return _learning_rate_return_value(eager_decay_rate)184def manual_stepping(global_step, boundaries, rates, warmup=False):185 """Manually stepped learning rate schedule.186 This function provides fine grained control over learning rates. One must187 specify a sequence of learning rates as well as a set of integer steps188 at which the current learning rate must transition to the next. For example,189 if boundaries = [5, 10] and rates = [.1, .01, .001], then the learning190 rate returned by this function is .1 for global_step=0,...,4, .01 for191 global_step=5...9, and .001 for global_step=10 and onward.192 Args:193 global_step: int64 (scalar) tensor representing global step.194 boundaries: a list of global steps at which to switch learning195 rates. This list is assumed to consist of increasing positive integers.196 rates: a list of (float) learning rates corresponding to intervals between197 the boundaries. The length of this list must be exactly198 len(boundaries) + 1.199 warmup: Whether to linearly interpolate learning rate for steps in200 [0, boundaries[0]].201 Returns:202 If executing eagerly:203 returns a no-arg callable that outputs the (scalar)204 float tensor learning rate given the current value of global_step.205 If in a graph:206 immediately returns a (scalar) float tensor representing learning rate.207 Raises:208 ValueError: if one of the following checks fails:209 1. boundaries is a strictly increasing list of positive integers210 2. len(rates) == len(boundaries) + 1211 3. boundaries[0] != 0212 """213 if any([b < 0 for b in boundaries]) or any(214 [not isinstance(b, int) for b in boundaries]):215 raise ValueError('boundaries must be a list of positive integers')216 if any([bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]):217 raise ValueError('Entries in boundaries must be strictly increasing.')218 if any([not isinstance(r, float) for r in rates]):219 raise ValueError('Learning rates must be floats')220 if len(rates) != len(boundaries) + 1:221 raise ValueError('Number of provided learning rates must exceed '222 'number of boundary points by exactly 1.')223 if boundaries and boundaries[0] == 0:224 raise ValueError('First step cannot be zero.')225 if warmup and boundaries:226 slope = (rates[1] - rates[0]) * 1.0 / boundaries[0]227 warmup_steps = list(range(boundaries[0]))228 warmup_rates = [rates[0] + slope * step for step in warmup_steps]229 boundaries = warmup_steps + boundaries230 rates = warmup_rates + rates[1:]231 else:232 boundaries = [0] + boundaries233 num_boundaries = len(boundaries)234 def eager_decay_rate():235 """Callable to compute the learning rate."""236 rate_index = tf.reduce_max(tf.where(237 tf.greater_equal(global_step, boundaries),238 list(range(num_boundaries)),239 [0] * num_boundaries))240 return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries),241 name='learning_rate')...

Full Screen

Full Screen

optimizer_builder.py

Source:optimizer_builder.py Github

copy

Full Screen

...39 optimizer = None40 summary_vars = []41 if optimizer_type == 'rms_prop_optimizer':42 config = optimizer_config.rms_prop_optimizer43 learning_rate = _create_learning_rate(config.learning_rate,44 global_step=global_step)45 summary_vars.append(learning_rate)46 optimizer = tf.train.RMSPropOptimizer(47 learning_rate,48 decay=config.decay,49 momentum=config.momentum_optimizer_value,50 epsilon=config.epsilon)51 if optimizer_type == 'momentum_optimizer':52 config = optimizer_config.momentum_optimizer53 learning_rate = _create_learning_rate(config.learning_rate,54 global_step=global_step)55 summary_vars.append(learning_rate)56 optimizer = tf.train.MomentumOptimizer(57 learning_rate,58 momentum=config.momentum_optimizer_value)59 if optimizer_type == 'adam_optimizer':60 config = optimizer_config.adam_optimizer61 learning_rate = _create_learning_rate(config.learning_rate,62 global_step=global_step)63 summary_vars.append(learning_rate)64 optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=config.epsilon)65 if optimizer is None:66 raise ValueError('Optimizer %s not supported.' % optimizer_type)67 if optimizer_config.use_moving_average:68 optimizer = tf_opt.MovingAverageOptimizer(69 optimizer, average_decay=optimizer_config.moving_average_decay)70 return optimizer, summary_vars71def build_optimizers_tf_v2(optimizer_config, global_step=None):72 """Create a TF v2 compatible optimizer based on config.73 Args:74 optimizer_config: A Optimizer proto message.75 global_step: A variable representing the current step.76 If None, defaults to tf.train.get_or_create_global_step()77 Returns:78 An optimizer and a list of variables for summary.79 Raises:80 ValueError: when using an unsupported input data type.81 """82 optimizer_type = optimizer_config.WhichOneof('optimizer')83 optimizer = None84 summary_vars = []85 if optimizer_type == 'rms_prop_optimizer':86 config = optimizer_config.rms_prop_optimizer87 learning_rate = _create_learning_rate(config.learning_rate,88 global_step=global_step)89 summary_vars.append(learning_rate)90 optimizer = tf.keras.optimizers.RMSprop(91 learning_rate,92 decay=config.decay,93 momentum=config.momentum_optimizer_value,94 epsilon=config.epsilon)95 if optimizer_type == 'momentum_optimizer':96 config = optimizer_config.momentum_optimizer97 learning_rate = _create_learning_rate(config.learning_rate,98 global_step=global_step)99 summary_vars.append(learning_rate)100 optimizer = tf.keras.optimizers.SGD(101 learning_rate,102 momentum=config.momentum_optimizer_value)103 if optimizer_type == 'adam_optimizer':104 config = optimizer_config.adam_optimizer105 learning_rate = _create_learning_rate(config.learning_rate,106 global_step=global_step)107 summary_vars.append(learning_rate)108 optimizer = tf.keras.optimizers.Adam(learning_rate, epsilon=config.epsilon)109 if optimizer is None:110 raise ValueError('Optimizer %s not supported.' % optimizer_type)111 if optimizer_config.use_moving_average:112 optimizer = ema_optimizer.ExponentialMovingAverage(113 optimizer=optimizer,114 average_decay=optimizer_config.moving_average_decay)115 return optimizer, summary_vars116def build(config, global_step=None):117 if tf.executing_eagerly():118 return build_optimizers_tf_v2(config, global_step)119 else:120 return build_optimizers_tf_v1(config, global_step)121def _create_learning_rate(learning_rate_config, global_step=None):122 """Create optimizer learning rate based on config.123 Args:124 learning_rate_config: A LearningRate proto message.125 global_step: A variable representing the current step.126 If None, defaults to tf.train.get_or_create_global_step()127 Returns:128 A learning rate.129 Raises:130 ValueError: when using an unsupported input data type.131 """132 if global_step is None:133 global_step = tf.train.get_or_create_global_step()134 learning_rate = None135 learning_rate_type = learning_rate_config.WhichOneof('learning_rate')...

Full Screen

Full Screen

optimizer_builder_tf1_test.py

Source:optimizer_builder_tf1_test.py Github

copy

Full Screen

...37 }38 """39 learning_rate_proto = optimizer_pb2.LearningRate()40 text_format.Merge(learning_rate_text_proto, learning_rate_proto)41 learning_rate = optimizer_builder._create_learning_rate(42 learning_rate_proto)43 self.assertTrue(44 six.ensure_str(learning_rate.op.name).endswith('learning_rate'))45 with self.test_session():46 learning_rate_out = learning_rate.eval()47 self.assertAlmostEqual(learning_rate_out, 0.004)48 def testBuildExponentialDecayLearningRate(self):49 learning_rate_text_proto = """50 exponential_decay_learning_rate {51 initial_learning_rate: 0.00452 decay_steps: 9999953 decay_factor: 0.8554 staircase: false55 }56 """57 learning_rate_proto = optimizer_pb2.LearningRate()58 text_format.Merge(learning_rate_text_proto, learning_rate_proto)59 learning_rate = optimizer_builder._create_learning_rate(60 learning_rate_proto)61 self.assertTrue(62 six.ensure_str(learning_rate.op.name).endswith('learning_rate'))63 self.assertIsInstance(learning_rate, tf.Tensor)64 def testBuildManualStepLearningRate(self):65 learning_rate_text_proto = """66 manual_step_learning_rate {67 initial_learning_rate: 0.00268 schedule {69 step: 10070 learning_rate: 0.00671 }72 schedule {73 step: 9000074 learning_rate: 0.0000675 }76 warmup: true77 }78 """79 learning_rate_proto = optimizer_pb2.LearningRate()80 text_format.Merge(learning_rate_text_proto, learning_rate_proto)81 learning_rate = optimizer_builder._create_learning_rate(82 learning_rate_proto)83 self.assertIsInstance(learning_rate, tf.Tensor)84 def testBuildCosineDecayLearningRate(self):85 learning_rate_text_proto = """86 cosine_decay_learning_rate {87 learning_rate_base: 0.00288 total_steps: 2000089 warmup_learning_rate: 0.000190 warmup_steps: 100091 hold_base_rate_steps: 2000092 }93 """94 learning_rate_proto = optimizer_pb2.LearningRate()95 text_format.Merge(learning_rate_text_proto, learning_rate_proto)96 learning_rate = optimizer_builder._create_learning_rate(97 learning_rate_proto)98 self.assertIsInstance(learning_rate, tf.Tensor)99 def testRaiseErrorOnEmptyLearningRate(self):100 learning_rate_text_proto = """101 """102 learning_rate_proto = optimizer_pb2.LearningRate()103 text_format.Merge(learning_rate_text_proto, learning_rate_proto)104 with self.assertRaises(ValueError):105 optimizer_builder._create_learning_rate(learning_rate_proto)106@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')107class OptimizerBuilderTest(tf.test.TestCase):108 def testBuildRMSPropOptimizer(self):109 optimizer_text_proto = """110 rms_prop_optimizer: {111 learning_rate: {112 exponential_decay_learning_rate {113 initial_learning_rate: 0.004114 decay_steps: 800720115 decay_factor: 0.95116 }117 }118 momentum_optimizer_value: 0.9119 decay: 0.9...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful