How to use should_apply method in localstack

Best Python code snippet using localstack_python

loss_scale_test.py

Source:loss_scale_test.py Github

copy

Full Screen

1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Tests for LossScale classes.."""16from absl.testing import parameterized17import numpy as np18from tensorflow.python.data.ops import dataset_ops19from tensorflow.python.distribute import distribution_strategy_context20from tensorflow.python.distribute import mirrored_strategy21from tensorflow.python.eager import context22from tensorflow.python.framework import constant_op23from tensorflow.python.framework import ops24from tensorflow.python.framework import test_util25from tensorflow.python.ops import array_ops26from tensorflow.python.ops import check_ops27from tensorflow.python.ops import control_flow_ops28from tensorflow.python.ops import math_ops29from tensorflow.python.ops import variables30from tensorflow.python.platform import test31from tensorflow.python.training.experimental import loss_scale as loss_scale_module32# TODO(reedwm): Create test case using multiple graphs33# If called outside any strategy.scope() calls, this will return the default34# strategy.35default_strategy_fn = distribution_strategy_context.get_strategy36def create_mirrored_strategy():37 if context.num_gpus() >= 1:38 return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0'])39 else:40 return mirrored_strategy.MirroredStrategy(['cpu:0'])41TESTCASES = ({42 'testcase_name': 'base',43 'strategy_fn': default_strategy_fn44}, {45 'testcase_name': 'distribute',46 'strategy_fn': create_mirrored_strategy47})48class FixedLossScaleTest(test.TestCase):49 @test_util.run_in_graph_and_eager_modes50 def test_basic(self):51 loss_scale_value = 100052 loss_scale = loss_scale_module.FixedLossScale(loss_scale_value)53 update_op, should_apply = loss_scale.update([constant_op.constant(0.)])54 self.evaluate(update_op)55 # should_apply should be a bool instead of a tensor, so that a tf.cond does56 # not have to be built in the graph by the caller.57 self.assertIsInstance(should_apply, bool)58 self.assertTrue(should_apply)59 self.assertEqual(loss_scale_value, self.evaluate(loss_scale()))60 update_op, should_apply = loss_scale.update(61 [constant_op.constant(float('NaN'))])62 self.evaluate(update_op)63 self.assertIsInstance(should_apply, bool)64 self.assertTrue(should_apply)65 self.assertEqual(loss_scale_value, self.evaluate(loss_scale()))66 @test_util.run_in_graph_and_eager_modes67 def test_serialization(self):68 loss_scale = loss_scale_module.get(123)69 config = loss_scale.get_config()70 loss_scale = loss_scale_module.FixedLossScale.from_config(config)71 self.assertEqual(self.evaluate(loss_scale()), 123.)72 @test_util.run_in_graph_and_eager_modes73 def test_call_type(self):74 scalar = loss_scale_module.FixedLossScale(123)75 self.assertIsInstance(scalar(), ops.Tensor)76 @test_util.run_in_graph_and_eager_modes77 def test_repr(self):78 loss_scale = loss_scale_module.FixedLossScale(123)79 self.assertEqual(repr(loss_scale), 'FixedLossScale(123.0)')80def _get_example_iter(inputs):81 dataset = dataset_ops.Dataset.from_tensor_slices(inputs)82 return dataset_ops.make_one_shot_iterator(dataset)83class DynamicLossScaleTest(test.TestCase, parameterized.TestCase):84 def _get_tensor(self, is_finite):85 tensor = control_flow_ops.cond(is_finite, lambda: 1., lambda: float('NaN'))86 if not distribution_strategy_context.has_strategy():87 return tensor88 def get():89 rep_id = (90 distribution_strategy_context.get_replica_context()91 .replica_id_in_sync_group)92 return control_flow_ops.cond(93 math_ops.equal(rep_id, 0), lambda: tensor, lambda: 1.)94 distribution = distribution_strategy_context.get_strategy()95 return distribution.extended.call_for_each_replica(get)96 def _test_helper(self,97 inputs,98 expected_outputs,99 initial_loss_scale=1.,100 increment_period=2,101 multiplier=2):102 loss_scale = loss_scale_module.DynamicLossScale(103 initial_loss_scale=initial_loss_scale,104 increment_period=increment_period,105 multiplier=multiplier)106 itr = _get_example_iter(inputs)107 def update():108 is_finite = itr.get_next()109 grad = self._get_tensor(is_finite)110 update_op, should_apply_gradients = loss_scale.update([grad])111 assert_op = check_ops.assert_equal(should_apply_gradients, is_finite)112 if context.executing_eagerly():113 return114 with ops.control_dependencies([assert_op]):115 return array_ops.identity(update_op)116 actual_outputs = []117 if not context.executing_eagerly():118 update_op = update()119 self.evaluate(variables.global_variables_initializer())120 for _ in range(len(inputs)):121 if context.executing_eagerly():122 update()123 else:124 self.evaluate(update_op)125 actual_outputs.append(self.evaluate(loss_scale()))126 self.assertEqual(actual_outputs, expected_outputs)127 @parameterized.named_parameters(*TESTCASES)128 @test_util.run_in_graph_and_eager_modes129 def test_increase(self, strategy_fn):130 with strategy_fn().scope():131 inputs = [True] * 6132 expected_outputs = [1, 2, 2, 4, 4, 8]133 self._test_helper(inputs, expected_outputs)134 @parameterized.named_parameters(*TESTCASES)135 @test_util.run_in_graph_and_eager_modes136 def test_keep_increasing_until_capped(self, strategy_fn):137 with strategy_fn().scope():138 init_loss_scale = np.finfo(np.float32).max / 4139 max_float = np.finfo(np.float32).max140 inputs = [True] * 6141 # Output is capped the 2nd time it doubles.142 expected_outputs = [143 init_loss_scale, init_loss_scale * 2, init_loss_scale * 2, max_float,144 max_float, max_float145 ]146 self._test_helper(inputs, expected_outputs, init_loss_scale)147 @parameterized.named_parameters(*TESTCASES)148 @test_util.run_in_graph_and_eager_modes149 def test_decrease_every_step(self, strategy_fn):150 with strategy_fn().scope():151 inputs = [False] * 6152 init_loss_scale = 1024153 expected_outputs = [512, 256, 128, 64, 32, 16]154 self._test_helper(inputs, expected_outputs, init_loss_scale)155 @parameterized.named_parameters(*TESTCASES)156 @test_util.run_in_graph_and_eager_modes157 def test_keep_decreasing_until_one(self, strategy_fn):158 with strategy_fn().scope():159 inputs = [False] * 6160 init_loss_scale = 16161 expected_outputs = [8, 4, 2, 1, 1, 1]162 self._test_helper(inputs, expected_outputs, init_loss_scale)163 @parameterized.named_parameters(*TESTCASES)164 @test_util.run_in_graph_and_eager_modes165 def test_nan_clear_good_step(self, strategy_fn):166 with strategy_fn().scope():167 inputs = [True, True, True, False, True]168 expected_outputs = [1, 2, 2, 1, 1]169 self._test_helper(inputs, expected_outputs)170 @parameterized.named_parameters(*TESTCASES)171 @test_util.run_in_graph_and_eager_modes172 def test_trigger_loss_scale_update_each_step(self, strategy_fn):173 with strategy_fn().scope():174 init_loss_scale = 1175 increment_period = 1176 inputs = [True] * 3 + [False, True, True]177 expected_outputs = [2, 4, 8, 4, 8, 16]178 self._test_helper(inputs, expected_outputs, init_loss_scale,179 increment_period)180 @parameterized.named_parameters(*TESTCASES)181 @test_util.run_in_graph_and_eager_modes182 def test_alternating_good_and_bad_gradients_trigger_each_step(183 self, strategy_fn):184 with strategy_fn().scope():185 init_loss_scale = 1186 increment_period = 1187 inputs = [True, False] * 4 + [True]188 expected_outputs = [2, 1, 2, 1, 2, 1, 2, 1, 2]189 self._test_helper(inputs, expected_outputs, init_loss_scale,190 increment_period)191 @parameterized.named_parameters(*TESTCASES)192 @test_util.run_in_graph_and_eager_modes193 def test_alternating_good_and_bad_gradients_trigger_every_other_step(194 self, strategy_fn):195 with strategy_fn().scope():196 init_loss_scale = 32197 increment_period = 2198 inputs = [True, False] * 3 + [True]199 expected_outputs = [32, 16, 16, 8, 8, 4, 4]200 self._test_helper(inputs, expected_outputs, init_loss_scale,201 increment_period)202 @parameterized.named_parameters(*TESTCASES)203 @test_util.run_in_graph_and_eager_modes204 def test_nondefault_multiplier(self, strategy_fn):205 with strategy_fn().scope():206 init_loss_scale = 4207 multiplier = 3208 inputs = [True, True, False, True, True]209 expected_outputs = [4, 12, 4, 4, 12]210 self._test_helper(211 inputs, expected_outputs, init_loss_scale, multiplier=multiplier)212 @parameterized.named_parameters(*TESTCASES)213 @test_util.run_in_graph_and_eager_modes214 def test_random_mix_good_and_bad_gradients(self, strategy_fn):215 with strategy_fn().scope():216 init_loss_scale = 4217 inputs = [218 False, True, True, True, False, True, False, True, True, True, False219 ]220 expected_outputs = [2, 2, 4, 4, 2, 2, 1, 1, 2, 2, 1]221 self._test_helper(inputs, expected_outputs, init_loss_scale)222 @parameterized.named_parameters(*TESTCASES)223 @test_util.run_in_graph_and_eager_modes224 def test_single_tensor_gradient(self, strategy_fn):225 with strategy_fn().scope():226 loss_scale = loss_scale_module.DynamicLossScale()227 grad = constant_op.constant(4.0)228 _, should_apply = loss_scale.update(grad)229 self.assertTrue(self.evaluate(should_apply))230 @test_util.run_in_graph_and_eager_modes231 def test_serialization(self):232 loss_scale = loss_scale_module.DynamicLossScale(233 initial_loss_scale=1, increment_period=2, multiplier=3)234 config = loss_scale.get_config()235 loss_scale = loss_scale_module.DynamicLossScale.from_config(config)236 self.evaluate(variables.global_variables_initializer())237 self.assertEqual(self.evaluate(loss_scale()), 1)238 self.assertEqual(loss_scale.increment_period, 2)239 self.assertEqual(loss_scale.multiplier, 3)240 @test_util.run_in_graph_and_eager_modes241 def test_update_with_none_gradients(self):242 loss_scale = loss_scale_module.DynamicLossScale()243 loss_scale.update([None])244 @test_util.run_in_graph_and_eager_modes245 def test_get(self):246 scalar = loss_scale_module.get('dynamic')247 scalar2 = loss_scale_module.DynamicLossScale()248 self.assertEqual(scalar.initial_loss_scale, scalar2.initial_loss_scale)249 self.assertEqual(scalar.increment_period, scalar2.increment_period)250 self.assertEqual(scalar.multiplier, scalar2.multiplier)251 @test_util.run_in_graph_and_eager_modes252 def test_call_type(self):253 scalar = loss_scale_module.DynamicLossScale()254 self.assertIsInstance(scalar(), ops.Tensor)255 @parameterized.named_parameters(*TESTCASES)256 @test_util.run_in_graph_and_eager_modes257 def test_repr(self, strategy_fn):258 with strategy_fn().scope():259 loss_scale = loss_scale_module.DynamicLossScale(260 initial_loss_scale=1, increment_period=2, multiplier=3)261 if context.executing_eagerly():262 self.assertEqual(repr(loss_scale),263 'DynamicLossScale(current_loss_scale=1.0, '264 'num_good_steps=0, initial_loss_scale=1.0, '265 'increment_period=2, multiplier=3.0)')266 else:267 self.assertEqual(repr(loss_scale),268 'DynamicLossScale(initial_loss_scale=1.0, '269 'increment_period=2, multiplier=3.0)')270if __name__ == '__main__':...

Full Screen

Full Screen

test_recovery.py

Source:test_recovery.py Github

copy

Full Screen

...13@pytest.fixture(autouse=True)14def mock_sleep(monkeypatch):15 monkeypatch.setattr('time.sleep', lambda seconds: None)16@pytest.fixture(params=[1, 2, (1, 2), (1, 3)])17def should_apply(request):18 expected_num_requests = 4 # 3 auth requests and 1 folder info request19 # Figure out how many times the call should fail. If should_apply is an int, it should fail iff should_apply == 120 # If it's a sequence, figure out the first time it will succeed. It should fail until then.21 if isinstance(request.param, int):22 if request.param is 1:23 expected_num_requests += 124 else:25 expected_num_requests += next(26 (a for a in enumerate(sorted(request.param)) if a[0] + 1 != a[1]),27 [0],28 )[0] or len(request.param)29 return request.param, expected_num_requests30@pytest.fixture(autouse=True)31def reset_chaos(mock_box, request):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful