How to use run_step method in ATX

Best Python code snippet using ATX

minimize_loss_test.py

Source:minimize_loss_test.py Github

copy

Full Screen

...55 distribution.call_for_each_tower(56 model_fn, *inputs, run_concurrently=layer.built))57 iterator = distribution.distribute_dataset(58 dataset_fn).make_one_shot_iterator()59 def run_step():60 return distribution.run_steps_on_dataset(61 step_fn, iterator, iterations=2).run_op62 self.evaluate(distribution.initialize())63 if not context.executing_eagerly():64 with self.cached_session() as sess:65 run_step = sess.make_callable(run_step())66 self.evaluate(variables_lib.global_variables_initializer())67 weights, biases = [], []68 for _ in range(5):69 run_step()70 weights.append(self.evaluate(layer.kernel))71 biases.append(self.evaluate(layer.bias))72 self.evaluate(distribution.finalize())73 error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)74 is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))75 self.assertTrue(is_not_increasing)76 @combinations.generate(77 combinations.times(78 combinations.distributions_and_v1_optimizers(),79 combinations.combine(mode=["graph"], use_callable_loss=[True, False])80 + combinations.combine(mode=["eager"], use_callable_loss=[True])))81 def testTrainNetworkByCallForEachTower(self, distribution, optimizer_fn,82 use_callable_loss):83 with distribution.scope():84 model_fn, dataset_fn, layer = minimize_loss_example(85 optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss)86 iterator = distribution.distribute_dataset(87 dataset_fn).make_one_shot_iterator()88 def run_step():89 return distribution.group(90 distribution.call_for_each_tower(91 model_fn, iterator.get_next(), run_concurrently=layer.built))92 if not context.executing_eagerly():93 with self.cached_session() as sess:94 run_step = sess.make_callable(run_step())95 self.evaluate(variables_lib.global_variables_initializer())96 weights, biases = [], []97 for _ in range(10):98 run_step()99 weights.append(self.evaluate(layer.kernel))100 biases.append(self.evaluate(layer.bias))101 error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)102 is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))103 self.assertTrue(is_not_increasing)104 @combinations.generate(105 combinations.times(106 combinations.distributions_and_v1_optimizers() +107 combinations.distributions_and_v2_optimizers(),108 combinations.combine(mode=["graph", "eager"])) +109 combinations.combine(110 distribution=[combinations.tpu_strategy],111 optimizer_fn=combinations.optimizers_v1+combinations.optimizers_v2,112 mode=["graph"]))113 def testOptimizerInsideModelFn(self, distribution, optimizer_fn):114 created_variables = []115 trainable_variables = []116 def appending_creator(next_creator, *args, **kwargs):117 v = next_creator(*args, **kwargs)118 created_variables.append(v.name)119 if "trainable" in kwargs and kwargs["trainable"]:120 trainable_variables.append(v.name)121 return v122 # Creator scope needs to be set before it's used inside123 # `distribution.scope`.124 with variable_scope.variable_creator_scope(125 appending_creator), distribution.scope():126 model_fn, dataset_fn, layer = minimize_loss_example(127 optimizer_fn,128 use_bias=True,129 use_callable_loss=True,130 create_optimizer_inside_model_fn=True)131 def step_fn(ctx, *inputs):132 del ctx # Unused133 return distribution.group(134 distribution.call_for_each_tower(135 model_fn, *inputs, run_concurrently=layer.built))136 iterator = distribution.distribute_dataset(137 dataset_fn).make_one_shot_iterator()138 def run_step():139 return distribution.run_steps_on_dataset(140 step_fn, iterator, iterations=1).run_op141 self.evaluate(distribution.initialize())142 if not context.executing_eagerly():143 with self.cached_session() as sess:144 run_step = sess.make_callable(run_step())145 self.evaluate(variables_lib.global_variables_initializer())146 run_step()147 self.evaluate(distribution.finalize())148 def get_expected_variables(optimizer_fn, num_parameter_devices):149 variables_map = {150 "GradientDescent": ["dense/kernel", "dense/bias"],151 "Adam": [152 "dense/kernel", "dense/bias", "beta1_power", "beta2_power",153 "dense/kernel/Adam", "dense/kernel/Adam_1", "dense/bias/Adam",154 "dense/bias/Adam_1"155 ]156 }157 variables = variables_map[optimizer_fn().get_name()]158 variables.extend([159 v + "/replica_{}".format(replica)160 for v in variables161 for replica in range(1, num_parameter_devices)162 ])163 return set([v + ":0" for v in variables])164 self.assertEqual(165 get_expected_variables(optimizer_fn,166 len(distribution.parameter_devices)),167 set(created_variables))168 @combinations.generate(169 combinations.times(170 combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]),171 combinations.times(172 combinations.distributions_and_v1_optimizers(),173 combinations.combine(174 mode=["graph", "eager"],175 # TODO(isaprykin): Allow False here. Currently subsequent176 # towers will re-execute UPDATE_OPS of previous towers.177 update_ops_in_cross_tower_mode=[True])) +178 combinations.combine(179 distribution=[combinations.tpu_strategy],180 optimizer_fn=combinations.optimizers_v1,181 mode=["graph"],182 update_ops_in_cross_tower_mode=[False])))183 def testTrainNetworkWithBatchNorm(self, distribution, optimizer_fn, momentum,184 renorm, update_ops_in_cross_tower_mode):185 """Verifies that moving mean updates are reduced across towers."""186 with distribution.scope():187 num_towers = len(distribution.worker_devices)188 model_fn, dataset_fn, batchnorm = batchnorm_example(189 optimizer_fn,190 batch_per_epoch=num_towers,191 momentum=momentum,192 renorm=renorm,193 update_ops_in_tower_mode=not update_ops_in_cross_tower_mode)194 # Make sure prefetching is disabled since that makes the195 # specific input on each device to be non deterministic, and196 # this test relies on specific input being on each device.197 if isinstance(distribution, mirrored_strategy.MirroredStrategy):198 self.assertFalse(distribution._prefetch_on_device)199 def step_fn(ctx, *inputs):200 del ctx # Unused201 fetches = distribution.unwrap(202 distribution.call_for_each_tower(203 model_fn, *inputs, run_concurrently=batchnorm.built))204 if update_ops_in_cross_tower_mode:205 fetches += ops.get_collection(ops.GraphKeys.UPDATE_OPS)206 return control_flow_ops.group(fetches)207 iterator = distribution.distribute_dataset(208 dataset_fn).make_one_shot_iterator()209 def run_step():210 return distribution.run_steps_on_dataset(211 step_fn, iterator, iterations=1).run_op212 self.evaluate(distribution.initialize())213 if not context.executing_eagerly():214 with self.cached_session() as sess:215 run_step = sess.make_callable(run_step())216 self.evaluate(variables_lib.global_variables_initializer())217 expected_moving_means = [0.] * 8218 def averaged_batch_mean(i):219 # Each batch has shape [16, 8] where the ith element in jth list is220 # (8 * j + i + tower_id * 100). So the batch mean in each tower is221 # (60 + i + tower_id * 100). So here comes its batch mean over all222 # towers:223 return 60. + i + (num_towers - 1.) / 2. * 100.224 for _ in range(10):225 run_step()226 moving_means = self.evaluate(batchnorm.moving_mean)227 # We make sure that the moving_mean is updated as if the sample mean is228 # calculated over all towers.229 for i, expected_moving_mean in enumerate(expected_moving_means):230 expected_moving_means[i] -= ((231 expected_moving_mean - averaged_batch_mean(i)) * (1.0 - momentum))232 self.assertNear(expected_moving_means[i], moving_means[i], 0.0001)233 self.evaluate(distribution.finalize())234 @combinations.generate(235 combinations.times(236 combinations.combine(237 optimizer_fn=[238 combinations.gradient_descent_optimizer_v1_fn,239 combinations.gradient_descent_optimizer_v2_fn240 ],241 loss_reduction=[242 losses_impl.Reduction.SUM, losses_impl.Reduction.MEAN,243 losses_impl.Reduction.SUM_OVER_BATCH_SIZE,244 losses_impl.Reduction.SUM_OVER_NONZERO_WEIGHTS245 ]),246 combinations.times(247 combinations.combine(248 distribution=[249 combinations.one_device_strategy,250 combinations.mirrored_strategy_with_gpu_and_cpu,251 combinations.mirrored_strategy_with_two_gpus252 ]),253 combinations.combine(254 mode=["graph"], use_callable_loss=[True, False]) +255 combinations.combine(mode=["eager"], use_callable_loss=[True])) +256 combinations.combine(257 distribution=[combinations.tpu_strategy],258 mode=["graph"],259 use_callable_loss=[True, False])))260 def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction,261 use_callable_loss):262 with distribution.scope():263 all_vars = []264 def model_fn(x, y):265 def loss_fn():266 # Use fixed initialization to make the steps deterministic.267 w = variable_scope.get_variable("w", initializer=[[2.]])268 all_vars.append(w)269 predict = math_ops.matmul(x, w)270 return losses_impl.mean_squared_error(271 y, predict, reduction=loss_reduction)272 optimizer = optimizer_fn() # GradientDescent with 0.2 learning rate273 if use_callable_loss:274 return optimizer.minimize(loss_fn)275 else:276 return optimizer.minimize(loss_fn())277 def dataset_fn():278 features = dataset_ops.Dataset.from_tensors([[2.], [7.]])279 labels = dataset_ops.Dataset.from_tensors([[6.], [21.]])280 return dataset_ops.Dataset.zip((features, labels)).repeat()281 def step_fn(ctx, x, y):282 del ctx # Unused283 return distribution.group(284 distribution.call_for_each_tower(285 model_fn, x, y, run_concurrently=False))286 iterator = distribution.distribute_dataset(287 dataset_fn).make_one_shot_iterator()288 def run_step():289 return distribution.run_steps_on_dataset(290 step_fn, iterator, iterations=1).run_op291 self.evaluate(distribution.initialize())292 if not context.executing_eagerly():293 with self.cached_session() as sess:294 run_step = sess.make_callable(run_step())295 self.evaluate(variables_lib.global_variables_initializer())296 run_step()297 v = all_vars[0]298 self.assertTrue(all([v is vi for vi in all_vars[1:]]))299 weight = numpy.squeeze(self.evaluate(v))300 # Our model is:301 # predict = x * w302 # loss = (predict - y)^2303 # dloss/dpredict = 2*(predict - y)304 # dloss/dw = 2 * x^T @ (predict - y)305 # For our batch size of 2, assuming sum loss reduction:306 # x = [2, 7]307 # y = [6, 21]308 # w_initial = 2309 # predict = [4, 14]310 # predict - y = [-2, -7]311 # dloss/dw = 2 <[2, 7], [-2, -7]> = - 2(4 + 49) = -106312 # So unreplicated the update to w with lr=0.2 is -0.2 * -106 = 21.2313 # with sum loss reduction, or 10.6 with mean.314 if loss_reduction == losses_impl.Reduction.SUM:315 # Note that the "distribution.num_towers" factor will go away once316 # we split the input across towers, instead of pulling a complete317 # batch of input per tower.318 self.assertNear(weight, 2 + 21.2 * distribution.num_towers, 0.0001)319 else:320 # One of the mean loss reductions.321 self.assertNear(weight, 2 + 10.6, 0.0001)322 self.evaluate(distribution.finalize())323 @combinations.generate(324 combinations.times(325 combinations.distributions_and_v1_optimizers(),326 combinations.combine(mode=["graph", "eager"]),327 combinations.combine(is_tpu=[False])) +328 combinations.combine(329 distribution=[combinations.tpu_strategy],330 optimizer_fn=combinations.optimizers_v1,331 mode=["graph"],332 is_tpu=[True]))333 def testRunStepsWithOutputContext(self, distribution, optimizer_fn, is_tpu):334 with distribution.scope():335 def dataset_fn():336 dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat()337 # TODO(priyag): batch with drop_remainder=True causes shapes to be338 # fully defined for TPU. Remove this when XLA supports dynamic shapes.339 return dataset.batch(batch_size=1, drop_remainder=True)340 optimizer = optimizer_fn()341 layer = core.Dense(1, use_bias=True)342 key1 = "foo"343 value1 = "bar"344 def model_fn(output_context, x):345 """A very simple model written by the user."""346 def loss_fn():347 y = array_ops.reshape(layer(x), []) - constant_op.constant(1.)348 return y * y349 train_op = optimizer.minimize(loss_fn)350 loss = loss_fn()351 output_context.set_last_step_output(352 name="tower_loss_agg",353 output=loss,354 aggregation=variables_lib.VariableAggregation.MEAN)355 output_context.set_non_tensor_output(key1, value1)356 return (train_op, loss)357 def step_fn(output_context, *inputs):358 (train_op, loss) = distribution.call_for_each_tower(359 model_fn, output_context, *inputs, run_concurrently=False)360 output_context.set_last_step_output(361 name="cross_tower_loss_agg",362 output=loss,363 aggregation=variables_lib.VariableAggregation.MEAN)364 output_context.set_last_step_output(365 name="cross_tower_loss_noagg",366 output=loss)367 return distribution.group(train_op)368 iterator = distribution.distribute_dataset(369 dataset_fn).make_one_shot_iterator()370 def run_step():371 initial_loss = lambda: constant_op.constant(1e7)372 # Initial values corresponding to aggregated losses are just single373 # tensors. But for non aggregated losses, we need to have initial374 # values that are of the same structure as non reduced losses. In375 # MirroredStrategy, this will be a list of losses, in TPUStrategy376 # it will be single tensor. Using `broadcast` followed by `unwrap`377 # gives us the desired initial value structure.378 initial_loop_values = {379 "tower_loss_agg": initial_loss(),380 "cross_tower_loss_agg": initial_loss(),381 "cross_tower_loss_noagg":382 distribution.unwrap(distribution.broadcast(initial_loss()))383 }384 ctx = distribution.run_steps_on_dataset(385 step_fn, iterator, iterations=2,386 initial_loop_values=initial_loop_values)387 self.assertEqual({key1: [value1]}, ctx.non_tensor_outputs)388 self._verify_loss_output(389 initial_loss(),390 loss_output=ctx.last_step_outputs["tower_loss_agg"],391 aggregated=True, distribution=distribution)392 self._verify_loss_output(393 initial_loss(),394 loss_output=ctx.last_step_outputs["cross_tower_loss_agg"],395 aggregated=True, distribution=distribution)396 self._verify_loss_output(397 initial_loss(),398 loss_output=ctx.last_step_outputs["cross_tower_loss_noagg"],399 aggregated=False, distribution=distribution)400 return (ctx.run_op, ctx.last_step_outputs["tower_loss_agg"])401 self.evaluate(distribution.initialize())402 if not context.executing_eagerly():403 with self.cached_session() as sess:404 run_step = sess.make_callable(run_step())405 self.evaluate(variables_lib.global_variables_initializer())406 weights, biases, losses = [], [], []407 for _ in range(5):408 _, loss = run_step()409 losses.append(loss)410 weights.append(self.evaluate(layer.kernel))411 biases.append(self.evaluate(layer.bias))412 self.evaluate(distribution.finalize())413 loss_is_not_increasing = all(y <= x for x, y in zip(losses, losses[1:]))414 self.assertTrue(loss_is_not_increasing)415 error = abs(416 numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)417 error_is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))418 self.assertTrue(error_is_not_increasing)419 def _verify_loss_output(self, initial_loss, loss_output, aggregated,420 distribution):421 if not aggregated:422 self.assertEqual(distribution.num_towers,...

Full Screen

Full Screen

release.py

Source:release.py Github

copy

Full Screen

...18 choice = raw_input("--- Confirm step? (y/N) [y] ")19 if choice.lower() == 'n':20 return True21 return False22def run_step(*args):23 """24 Prints out the command and asks if it should be run.25 If yes (default), runs it.26 :param args: list of strings (command and args)27 """28 global DRY_RUN29 cmd = args30 print(' '.join(cmd))31 if skip_step():32 print('--- Skipping...')33 elif DRY_RUN:34 print('--- Pretending to run...')35 else:36 subprocess.check_output(cmd)37def version(version_file):38 _version_re = re.compile(r'__version__\s+=\s+(.*)')39 with open(version_file, 'rb') as f:40 ver = str(ast.literal_eval(_version_re.search(41 f.read().decode('utf-8')).group(1)))42 return ver43def commit_for_release(version_file, ver):44 run_step('git', 'reset')45 run_step('git', 'add', version_file)46 run_step('git', 'commit', '--message', 'Releasing version %s' % ver)47def create_git_tag(tag_name):48 run_step('git', 'tag', tag_name)49def register_with_pypi():50 run_step('python', 'setup.py', 'register')51def create_source_tarball():52 run_step('python', 'setup.py', 'sdist')53def upload_source_tarball():54 run_step('python', 'setup.py', 'sdist', 'upload')55def push_to_github():56 run_step('git', 'push', 'origin', 'master')57def push_tags_to_github():58 run_step('git', 'push', '--tags', 'origin')59if __name__ == '__main__':60 if DEBUG:61 subprocess.check_output = lambda x: x62 ver = version('wharfee/__init__.py')63 print('Releasing Version:', ver)64 parser = OptionParser()65 parser.add_option(66 "-c", "--confirm-steps", action="store_true", dest="confirm_steps",67 default=False, help=("Confirm every step. If the step is not "68 "confirmed, it will be skipped.")69 )70 parser.add_option(71 "-d", "--dry-run", action="store_true", dest="dry_run",72 default=False, help="Print out, but not actually run any steps."...

Full Screen

Full Screen

test_base.py

Source:test_base.py Github

copy

Full Screen

...14def test_scTenifoldKnk_method1():15 df = get_test_df(n_cells=100, n_genes=100, random_state=42)16 sc = scTenifoldKnk(data=df,17 qc_kws={"min_lib_size": 1})18 sc.run_step("qc")19 sc.run_step("nc", n_cpus=1)20 sc.run_step("td")21 sc.run_step("ko", ko_genes=[sc.tensor_dict["WT"].index.to_list()[0]])22 sc.run_step("ma")23 sc.run_step("dr")24 assert isinstance(sc.d_regulation, pd.DataFrame)25 sc.save(file_dir="./saved_knk")26 sc2 = scTenifoldKnk.load(file_dir="./saved_knk")27 np.array_equal(sc.tensor_dict["WT"], sc2.tensor_dict["WT"])28def test_scTenifoldKnk_method2():29 df = get_test_df(n_genes=100, n_cells=100)30 sc = scTenifoldKnk(data=df,31 ko_method="propagation", # the gene you wants to knock out32 qc_kws={"min_lib_size": 10, "min_percent": 0.001},33 ko_kws={"degree": 10})34 sc.run_step("qc")35 sc.run_step("nc", n_cpus=-1)36 sc.run_step("td")37 sc.run_step("ko", ko_genes=[sc.tensor_dict["WT"].index.to_list()[0]])38 sc.run_step("ma")39 sc.run_step("dr")...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run ATX automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful