How to use _upgrade method in tempest

Best Python code snippet using tempest_python

tf_upgrade_v2_test.py

Source:tf_upgrade_v2_test.py Github

copy

Full Screen

...87 for name in api_names_v1:88 cls.v1_symbols["tf." + name] = attr89 visitor = public_api.PublicAPIVisitor(symbol_collector_v1)90 traverse.traverse(tf.compat.v1, visitor)91 def _upgrade(self, old_file_text):92 in_file = six.StringIO(old_file_text)93 out_file = six.StringIO()94 upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())95 count, report, errors = (96 upgrader.process_opened_file("test.py", in_file,97 "test_out.py", out_file))98 return count, report, errors, out_file.getvalue()99 def testParseError(self):100 _, report, unused_errors, unused_new_text = self._upgrade(101 "import tensorflow as tf\na + \n")102 self.assertTrue(report.find("Failed to parse") != -1)103 def testReport(self):104 text = "tf.angle(a)\n"105 _, report, unused_errors, unused_new_text = self._upgrade(text)106 # This is not a complete test, but it is a sanity test that a report107 # is generating information.108 self.assertTrue(report.find("Renamed function `tf.angle` to "109 "`tf.math.angle`"))110 def testRename(self):111 text = "tf.conj(a)\n"112 _, unused_report, unused_errors, new_text = self._upgrade(text)113 self.assertEqual(new_text, "tf.math.conj(a)\n")114 text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"115 _, unused_report, unused_errors, new_text = self._upgrade(text)116 self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")117 def testAllAPI(self):118 if not hasattr(tf.compat, "v2"):119 return120 # Converts all symbols in the v1 namespace to the v2 namespace, raising121 # an error if the target of the conversion is not in the v2 namespace.122 # Please regenerate the renames file or edit any manual renames if this123 # test fails.124 def conversion_visitor(unused_path, unused_parent, children):125 for child in children:126 _, attr = tf_decorator.unwrap(child[1])127 api_names = tf_export.get_v1_names(attr)128 for name in api_names:129 _, _, _, text = self._upgrade("tf." + name)130 if (text and131 not text.startswith("tf.compat.v1") and132 not text.startswith("tf.compat.v2") and133 text not in self.v2_symbols and134 # Builds currently install old version of estimator that doesn't135 # have some 2.0 symbols.136 not text.startswith("tf.estimator")):137 self.assertFalse(138 True, "Symbol %s generated from %s not in v2 API" % (139 text, name))140 visitor = public_api.PublicAPIVisitor(conversion_visitor)141 visitor.do_not_descend_map["tf"].append("contrib")142 visitor.private_map["tf.compat"] = ["v1", "v2"]143 traverse.traverse(tf.compat.v1, visitor)144 def testAllAPIV1(self):145 collect = True146 v1_symbols = set([])147 # Converts all symbols in the v1 namespace to the v2 namespace, raising148 # an error if the target of the conversion is not in the v1 namespace.149 def conversion_visitor(unused_path, unused_parent, children):150 for child in children:151 _, attr = tf_decorator.unwrap(child[1])152 api_names = tf_export.get_v1_names(attr)153 for name in api_names:154 if collect:155 v1_symbols.add("tf." + name)156 else:157 _, _, _, text = self._upgrade("tf." + name)158 if (text and159 not text.startswith("tf.compat.v1") and160 not text.startswith("tf.compat.v2") and161 not text.startswith("tf.estimator") and162 text not in v1_symbols):163 self.assertFalse(164 True, "Symbol %s generated from %s not in v1 API" % (165 text, name))166 visitor = public_api.PublicAPIVisitor(conversion_visitor)167 visitor.do_not_descend_map["tf"].append("contrib")168 visitor.private_map["tf.compat"] = ["v1", "v2"]169 traverse.traverse(tf.compat.v1, visitor)170 collect = False171 traverse.traverse(tf.compat.v1, visitor)172 def testV1KeywordArgNames(self):173 all_keyword_renames = (174 tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)175 # Visitor that verifies V1 argument names.176 def arg_test_visitor(unused_path, unused_parent, children):177 for child in children:178 _, attr = tf_decorator.unwrap(child[1])179 names_v1 = tf_export.get_v1_names(attr)180 for name in names_v1:181 name = "tf.%s" % name182 if name not in all_keyword_renames:183 continue184 arg_names_v1 = tf_inspect.getargspec(attr)[0]185 keyword_renames = all_keyword_renames[name]186 self.assertEqual(type(keyword_renames), dict)187 # Assert that v1 function has valid v1 argument names.188 for from_name, _ in keyword_renames.items():189 self.assertIn(190 from_name, arg_names_v1,191 "%s not found in %s arguments: %s" %192 (from_name, name, str(arg_names_v1)))193 visitor = public_api.PublicAPIVisitor(arg_test_visitor)194 visitor.do_not_descend_map["tf"].append("contrib")195 visitor.private_map["tf.compat"] = ["v1", "v2"]196 traverse.traverse(tf.compat.v1, visitor)197 def testV2KeywordArgNames(self):198 # This test converts a call of the form:199 # tf.foo(arg1=0, arg2=1, ...)200 # to 2.0. Then, checks that converted function has valid argument names.201 if not hasattr(tf.compat, "v2"):202 return203 v2_arg_exceptions = {204 "verify_shape_is_now_always_true",205 # These arguments should not be used, they just specify206 # that a function takes named arguments.207 "keyword_required",208 "_sentinel",209 }210 v1_name_exceptions = {211 "tf.print", # requires print_function import212 }213 function_warnings = (214 tf_upgrade_v2.TFAPIChangeSpec().function_warnings)215 function_transformers = (216 tf_upgrade_v2.TFAPIChangeSpec().function_transformers)217 keyword_renames = (218 tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)219 # Visitor that converts to V2 and checks V2 argument names.220 def conversion_visitor(unused_path, unused_parent, children):221 for child in children:222 _, attr = tf_decorator.unwrap(child[1])223 if not tf_inspect.isfunction(attr):224 continue225 names_v1 = tf_export.get_v1_names(attr)226 arg_names_v1 = get_args(attr)227 for name in names_v1:228 tf_name = "tf.%s" % name229 if tf_name in function_warnings or tf_name in function_transformers:230 continue # These require manual change231 if tf_name in v1_name_exceptions:232 continue233 # Assert that arg names after converting to v2 are present in234 # v2 function.235 # 1. First, create an input of the form:236 # tf.foo(arg1=val1, arg2=val2, ...)237 args = ",".join(238 ["%s=%d" % (from_name, from_index)239 for from_index, from_name in enumerate(arg_names_v1)])240 text_input = "%s(%s)" % (tf_name, args)241 # 2. Convert the input to V2.242 _, _, _, text = self._upgrade(text_input)243 new_function_name, new_args = get_func_and_args_from_str(text)244 if new_function_name == "tf.compat.v1.%s" % name:245 if tf_name in keyword_renames:246 # If we rename arguments, new function must be available in 2.0.247 # We should not be using compat.v1 in this case.248 self.assertFalse(249 "Function '%s' is not in 2.0 when converting\n%s\nto\n%s" %250 (new_function_name, text_input, text))251 continue252 if new_function_name.startswith("tf.compat.v2"):253 self.assertIn(new_function_name.replace("tf.compat.v2.", "tf."),254 self.v2_symbols)255 continue256 # 3. Verify V2 function and arguments.257 args_v2 = get_args(self.v2_symbols[new_function_name])258 args_v2.extend(v2_arg_exceptions)259 for new_arg in new_args:260 self.assertIn(261 new_arg, args_v2,262 "Invalid argument '%s' in 2.0 when converting\n%s\nto\n%s.\n"263 "Supported arguments: %s" % (264 new_arg, text_input, text, str(args_v2)))265 # 4. Verify that the argument exists in v1 as well.266 if new_function_name in set(["tf.nn.ctc_loss",267 "tf.saved_model.save"]):268 continue269 args_v1 = get_args(self.v1_symbols[new_function_name])270 args_v1.extend(v2_arg_exceptions)271 for new_arg in new_args:272 self.assertIn(273 new_arg, args_v1,274 "Invalid argument '%s' in 1.0 when converting\n%s\nto\n%s.\n"275 "Supported arguments: %s" % (276 new_arg, text_input, text, str(args_v1)))277 visitor = public_api.PublicAPIVisitor(conversion_visitor)278 visitor.do_not_descend_map["tf"].append("contrib")279 visitor.private_map["tf.compat"] = ["v1", "v2"]280 traverse.traverse(tf.compat.v1, visitor)281 def testPositionsMatchArgGiven(self):282 full_dict = tf_upgrade_v2.TFAPIChangeSpec().function_arg_warnings283 method_names = full_dict.keys()284 for method_name in method_names:285 args = full_dict[method_name].keys()286 # special case for optimizer methods287 if method_name.startswith("*."):288 method = method_name.replace("*", "tf.train.Optimizer")289 else:290 method = method_name291 method = get_symbol_for_name(tf, method)292 arg_spec = tf_inspect.getfullargspec(method)293 for (arg, pos) in args:294 # to deal with the self argument on methods on objects295 if method_name.startswith("*."):296 pos += 1297 self.assertEqual(arg_spec[0][pos], arg)298 def testReorderFileNeedsUpdate(self):299 reordered_function_names = (300 tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)301 function_reorders = (302 tf_upgrade_v2.TFAPIChangeSpec().function_reorders)303 manual_function_reorders = (304 tf_upgrade_v2.TFAPIChangeSpec().manual_function_reorders)305 added_names_message = """Some function names in306self.reordered_function_names are not in reorders_v2.py.307Please run the following commands to update reorders_v2.py:308bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map309bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map310"""311 removed_names_message = """%s in self.reorders_v2 does not match312any name in self.reordered_function_names.313Please run the following commands to update reorders_v2.py:314bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map315bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map316"""317 self.assertTrue(318 reordered_function_names.issubset(function_reorders),319 added_names_message)320 # function_reorders should contain reordered_function_names321 # and their TensorFlow V1 aliases.322 for name in function_reorders:323 if name in manual_function_reorders:324 continue325 # get other names for this function326 attr = get_symbol_for_name(tf.compat.v1, name)327 _, attr = tf_decorator.unwrap(attr)328 v1_names = tf_export.get_v1_names(attr)329 self.assertTrue(v1_names)330 v1_names = ["tf.%s" % n for n in v1_names]331 # check if any other name is in332 self.assertTrue(333 any(n in reordered_function_names for n in v1_names),334 removed_names_message % name)335 def testRenameConstant(self):336 text = "tf.MONOLITHIC_BUILD\n"337 _, unused_report, unused_errors, new_text = self._upgrade(text)338 self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")339 text = "some_call(tf.MONOLITHIC_BUILD)\n"340 _, unused_report, unused_errors, new_text = self._upgrade(text)341 self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")342 def testRenameArgs(self):343 text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "344 "dilation_rate_a, strides_a, name_a, data_format_a)\n")345 _, unused_report, unused_errors, new_text = self._upgrade(text)346 self.assertEqual(new_text,347 ("tf.nn.pool(input=input_a, window_shape=window_shape_a,"348 " pooling_type=pooling_type_a, padding=padding_a, "349 "dilations=dilation_rate_a, strides=strides_a, "350 "name=name_a, data_format=data_format_a)\n"))351 def testReorder(self):352 text = "tf.boolean_mask(a, b, c, d)\n"353 _, unused_report, unused_errors, new_text = self._upgrade(text)354 self.assertEqual(new_text,355 "tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")356 def testLearningRateDecay(self):357 for decay in ["tf.train.exponential_decay",358 "tf.train.polynomial_decay", "tf.train.natural_exp_decay",359 "tf.train.inverse_time_decay", "tf.train.cosine_decay",360 "tf.train.cosine_decay_restarts",361 "tf.train.linear_cosine_decay",362 "tf.train.noisy_linear_cosine_decay",363 "tf.train.piecewise_constant_decay",364 ]:365 text = "%s(a, b)\n" % decay366 _, report, unused_errors, _ = self._upgrade(text)367 self.assertIn("switch to the schedules in "368 "`tf.keras.optimizers.schedules`", report)369 def testMetrics(self):370 metrics = [371 "accuracy",372 "auc",373 "average_precision_at_k",374 "false_negatives",375 "false_negatives_at_thresholds",376 "false_positives",377 "false_positives_at_thresholds",378 "mean",379 "mean_absolute_error",380 "mean_cosine_distance",381 "mean_iou",382 "mean_per_class_accuracy",383 "mean_relative_error",384 "mean_squared_error",385 "mean_tensor",386 "percentage_below",387 "precision",388 "precision_at_k",389 "precision_at_thresholds",390 "precision_at_top_k",391 "recall",392 "recall_at_k",393 "recall_at_thresholds",394 "recall_at_top_k",395 "root_mean_squared_error",396 "sensitivity_at_specificity",397 "sparse_average_precision_at_k",398 "sparse_precision_at_k",399 "specificity_at_sensitivity",400 "true_negatives",401 "true_negatives_at_thresholds",402 "true_positives",403 "true_positives_at_thresholds",404 ]405 for m in metrics:406 text = "tf.metrics." + m + "(a, b)"407 _, report, unused_errors, new_text = self._upgrade(text)408 self.assertEqual("tf.compat.v1.metrics." + m + "(a, b)", new_text)409 self.assertIn(410 "tf.metrics have been replaced with object oriented versions", report)411 def testLosses(self):412 losses = [413 "absolute_difference",414 "add_loss",415 "compute_weighted_loss",416 "cosine_distance",417 "get_losses",418 "get_regularization_loss",419 "get_regularization_losses",420 "get_total_loss",421 "hinge_loss",422 "huber_loss",423 "log_loss",424 "mean_pairwise_squared_error",425 "mean_squared_error",426 "sigmoid_cross_entropy",427 "softmax_cross_entropy",428 "sparse_softmax_cross_entropy",429 ]430 for l in losses:431 text = "tf.losses." + l + "(a, b)"432 _, report, unused_errors, new_text = self._upgrade(text)433 self.assertEqual("tf.compat.v1.losses." + l + "(a, b)", new_text)434 self.assertIn(435 "tf.losses have been replaced with object oriented versions", report)436 def testEstimatorLossReductionChange(self):437 classes = [438 "LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",439 "DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",440 "BaselineClassifier", "BaselineRegressor"441 ]442 for c in classes:443 ns = "tf.estimator." + c444 text = ns + "()"445 expected_text = ns + "(loss_reduction=tf.compat.v1.losses.Reduction.SUM)"446 _, report, errors, new_text = self._upgrade(text)447 self.assertEqual(expected_text, new_text)448 text = ns + "(loss_reduction=TEST)"449 expected_text = ns + "(loss_reduction=TEST)"450 _, report, errors, new_text = self._upgrade(text)451 self.assertEqual(text, new_text)452 text = "tf.estimator.BaselineClassifier(m, c, w, v, o, c, lr)"453 expected_text = (454 "tf.estimator.BaselineClassifier(" +455 "model_dir=m, n_classes=c, weight_column=w, label_vocabulary=v, "456 "optimizer=o, config=c, loss_reduction=lr)")457 _, report, errors, new_text = self._upgrade(text)458 self.assertEqual(expected_text, new_text)459 text = "tf.estimator.BaselineClassifier(model_dir=model_dir)"460 expected_text = ("tf.estimator.BaselineClassifier(" +461 "model_dir=model_dir, "462 "loss_reduction=tf.compat.v1.losses.Reduction.SUM)")463 _, report, errors, new_text = self._upgrade(text)464 self.assertEqual(expected_text, new_text)465 def testGetVariableWithUseResource(self):466 text = "tf.get_variable(name=\"a\")"467 expected_text = "tf.compat.v1.get_variable(name=\"a\", use_resource=False)"468 _, unused_report, unused_errors, new_text = self._upgrade(text)469 self.assertEqual(expected_text, new_text)470 text = "tf.get_variable(name=\"a\", use_resource=None)"471 expected_text = "tf.compat.v1.get_variable(name=\"a\", use_resource=None)"472 _, unused_report, unused_errors, new_text = self._upgrade(text)473 self.assertEqual(expected_text, new_text)474 def testExtractGlimpse(self):475 text = ("tf.image.extract_glimpse(x, size, off, False, "476 "False, False, name=\"foo\")\n")477 _, unused_report, unused_errors, new_text = self._upgrade(text)478 self.assertEqual(479 new_text,480 "tf.image.extract_glimpse(x, size, off, False, "481 "False, 'uniform' if (False) else 'gaussian', name=\"foo\")\n",482 )483 text = ("tf.image.extract_glimpse(x, size, off, centered=False, "484 "normalized=False, uniform_noise=True if uniform_noise else "485 "False, name=\"foo\")\n")486 _, unused_report, unused_errors, new_text = self._upgrade(text)487 self.assertEqual(488 new_text,489 "tf.image.extract_glimpse(x, size, off, centered=False, "490 "normalized=False, noise='uniform' if (True if uniform_noise else "491 "False) else 'gaussian', name=\"foo\")\n",492 )493 text = ("tf.image.extract_glimpse(x,\n"494 " size,\n"495 " off,\n"496 " centered=True,\n"497 " normalized=True, # Stuff before\n"498 " uniform_noise=False,\n"499 " name=\"foo\")# Stuff after\n")500 _, unused_report, unused_errors, new_text = self._upgrade(text)501 self.assertEqual(502 new_text, "tf.image.extract_glimpse(x,\n"503 " size,\n"504 " off,\n"505 " centered=True,\n"506 " normalized=True, # Stuff before\n"507 " noise='uniform' if (False) else 'gaussian',\n"508 " name=\"foo\")# Stuff after\n")509 text = "tf.image.extract_glimpse(x)\n"510 _, unused_report, errors, new_text = self._upgrade(text)511 self.assertEqual(new_text, text)512 self.assertEqual(errors, [])513 def testDropout(self):514 text = "tf.nn.dropout(x, keep_prob, name=\"foo\")\n"515 _, unused_report, unused_errors, new_text = self._upgrade(text)516 self.assertEqual(517 new_text,518 "tf.nn.dropout(x, 1 - (keep_prob), name=\"foo\")\n",519 )520 text = "tf.nn.dropout(x, keep_prob=.4, name=\"foo\")\n"521 _, unused_report, unused_errors, new_text = self._upgrade(text)522 self.assertEqual(523 new_text,524 "tf.nn.dropout(x, rate=1 - (.4), name=\"foo\")\n",525 )526 text = (527 "tf.nn.dropout(x, # Stuff before\n"528 " keep_prob=.4, # Stuff after\n"529 " name=\"foo\")\n"530 )531 _, unused_report, unused_errors, new_text = self._upgrade(text)532 self.assertEqual(533 new_text,534 "tf.nn.dropout(x, # Stuff before\n"535 " rate=1 - (.4), # Stuff after\n"536 " name=\"foo\")\n",537 )538 text = "tf.nn.dropout(x)\n"539 _, unused_report, errors, new_text = self._upgrade(text)540 self.assertEqual(new_text, text)541 self.assertIn("tf.nn.dropout called without arguments", errors[0])542 def testDropoutExpr(self):543 text = "tf.nn.dropout(x, 1 - func(3 + 4.), name=\"foo\")\n"544 _, unused_report, unused_errors, new_text = self._upgrade(text)545 self.assertEqual(546 new_text,547 "tf.nn.dropout(x, 1 - (1 - func(3 + 4.)), name=\"foo\")\n",548 )549 def testMathCountNonZeroChanges(self):550 text = (551 "tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "552 "reduction_indices=axis, keep_dims=keepdims)\n"553 )554 _, unused_report, unused_errors, new_text = self._upgrade(text)555 expected_text = (556 "tf.math.count_nonzero(input=input, dtype=dtype, name=name, "557 "axis=axis, keepdims=keepdims)\n"558 )559 self.assertEqual(new_text, expected_text)560 def testCountNonZeroChanges(self):561 text = (562 "tf.count_nonzero(input_tensor=input, dtype=dtype, name=name, "563 "reduction_indices=axis, keep_dims=keepdims)\n"564 )565 _, unused_report, unused_errors, new_text = self._upgrade(text)566 expected_text = (567 "tf.math.count_nonzero(input=input, dtype=dtype, name=name, "568 "axis=axis, keepdims=keepdims)\n"569 )570 self.assertEqual(new_text, expected_text)571 def testRandomMultinomialToRandomCategorical(self):572 text = (573 "tf.random.multinomial(logits, samples, seed, name, output_dtype)\n"574 )575 _, unused_report, unused_errors, new_text = self._upgrade(text)576 expected_text = (577 "tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "578 "name=name, dtype=output_dtype)\n"579 )580 self.assertEqual(new_text, expected_text)581 text = (582 "tf.multinomial(logits, samples, seed, name, output_dtype)\n"583 )584 _, unused_report, unused_errors, new_text = self._upgrade(text)585 expected_text = (586 "tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "587 "name=name, dtype=output_dtype)\n"588 )589 self.assertEqual(new_text, expected_text)590 def testRandomPoissonConversion(self):591 text1 = "tf.random_poisson(lam, shape, dtype)"592 text2 = "tf.random.poisson(lam, shape, dtype)"593 expected_text = "tf.random.poisson(lam=lam, shape=shape, dtype=dtype)"594 _, unused_report, unused_errors, new_text1 = self._upgrade(text1)595 self.assertEqual(new_text1, expected_text)596 _, unused_report, unused_errors, new_text2 = self._upgrade(text2)597 self.assertEqual(new_text2, expected_text)598 def testConvolutionOpUpdate(self):599 text = (600 "tf.nn.convolution(input, filter, padding, strides, dilation_rate, "601 "name, data_format)"602 )603 _, unused_report, unused_errors, new_text = self._upgrade(text)604 expected_text = (605 "tf.nn.convolution(input=input, filters=filter, padding=padding, "606 "strides=strides, dilations=dilation_rate, name=name, "607 "data_format=data_format)"608 )609 self.assertEqual(new_text, expected_text)610 def test_substr(self):611 text = "tf.substr(input, pos, len, name, unit)\n"612 _, unused_report, errors, new_text = self._upgrade(text)613 self.assertEqual("tf.strings.substr(input=input, pos=pos, len=len, "614 "name=name, unit=unit)\n", new_text)615 self.assertEqual(errors, [])616 def testColocateGradientsWithOps(self):617 text = "tf.gradients(yx=a, foo=False)\n"618 _, unused_report, errors, new_text = self._upgrade(text)619 self.assertEqual(text, new_text)620 self.assertEqual(errors, [])621 text = "tf.gradients(yx=a, colocate_gradients_with_ops=False)\n"622 _, report, unused_errors, new_text = self._upgrade(text)623 self.assertEqual("tf.gradients(yx=a)\n", new_text)624 self.assertIn("tf.gradients no longer takes", report)625 text = "tf.gradients(y, x, grad_ys, name, colocate, gate)\n"626 expected = ("tf.gradients(ys=y, xs=x, grad_ys=grad_ys, name=name, "627 "gate_gradients=gate)\n")628 _, unused_report, errors, new_text = self._upgrade(text)629 self.assertEqual(expected, new_text)630 def testColocateGradientsWithOpsMinimize(self):631 text = "optimizer.minimize(a, foo=False)\n"632 _, unused_report, errors, new_text = self._upgrade(text)633 self.assertEqual(text, new_text)634 self.assertEqual(errors, [])635 text = "optimizer.minimize(a, colocate_gradients_with_ops=False)\n"636 _, report, unused_errors, new_text = self._upgrade(text)637 self.assertEqual("optimizer.minimize(a)\n", new_text)638 self.assertIn("Optimizer.minimize no longer takes", report)639 def testColocateGradientsWithOpsComputeGradients(self):640 text = "optimizer.compute_gradients(a, foo=False)\n"641 _, unused_report, errors, new_text = self._upgrade(text)642 self.assertEqual(text, new_text)643 self.assertEqual(errors, [])644 text = "optimizer.compute_gradients(a, colocate_gradients_with_ops=False)\n"645 _, report, unused_errors, new_text = self._upgrade(text)646 self.assertEqual("optimizer.compute_gradients(a)\n", new_text)647 self.assertIn("Optimizer.compute_gradients no longer takes", report)648 def testExportSavedModelRename(self):649 text = "self.est.export_savedmodel(path)"650 _, report, unused_errors, unused_new_text = self._upgrade(text)651 self.assertIn(652 "rename the method export_savedmodel() to export_saved_model()",653 report)654 def testArgmin(self):655 text = "tf.argmin(input, name=n, dimension=1, output_type=type)"656 expected_text = "tf.argmin(input=input, name=n, axis=1, output_type=type)"657 _, unused_report, unused_errors, new_text = self._upgrade(text)658 self.assertEqual(new_text, expected_text)659 text = "tf.argmin(input, 0)"660 expected_text = "tf.argmin(input=input, axis=0)"661 _, unused_report, unused_errors, new_text = self._upgrade(text)662 self.assertEqual(new_text, expected_text)663 text = "tf.arg_min(input, 0)"664 expected_text = "tf.argmin(input, 0)"665 _, unused_report, unused_errors, new_text = self._upgrade(text)666 self.assertEqual(new_text, expected_text)667 def testArgmax(self):668 text = "tf.argmax(input, name=n, dimension=1, output_type=type)"669 expected_text = "tf.argmax(input=input, name=n, axis=1, output_type=type)"670 _, unused_report, unused_errors, new_text = self._upgrade(text)671 self.assertEqual(new_text, expected_text)672 text = "tf.argmax(input, 0)"673 expected_text = "tf.argmax(input=input, axis=0)"674 _, unused_report, unused_errors, new_text = self._upgrade(text)675 self.assertEqual(new_text, expected_text)676 text = "tf.arg_max(input, 0)"677 expected_text = "tf.argmax(input, 0)"678 _, unused_report, unused_errors, new_text = self._upgrade(text)679 self.assertEqual(new_text, expected_text)680 def testEstimatorInputs(self):681 text = "tf.estimator.inputs.numpy_input_fn(0)"682 expected_text = "tf.compat.v1.estimator.inputs.numpy_input_fn(0)"683 _, unused_report, unused_errors, new_text = self._upgrade(text)684 self.assertEqual(new_text, expected_text)685 text = "tf.estimator.inputs.pandas_input_fn(0)"686 expected_text = "tf.compat.v1.estimator.inputs.pandas_input_fn(0)"687 _, unused_report, unused_errors, new_text = self._upgrade(text)688 self.assertEqual(new_text, expected_text)689 def testBatchToSpace(self):690 text = "tf.batch_to_space_nd(input, block_shape, crops, name)"691 expected_text = "tf.batch_to_space(input, block_shape, crops, name)"692 _, unused_report, unused_errors, new_text = self._upgrade(text)693 self.assertEqual(new_text, expected_text)694 text = "tf.batch_to_space(input, crops, block_size, name)"695 expected_text = (696 "tf.batch_to_space(input=input, crops=crops, block_shape=block_size, "697 "name=name)")698 _, unused_report, unused_errors, new_text = self._upgrade(text)699 self.assertEqual(new_text, expected_text)700 text = "tf.manip.batch_to_space_nd(input, block_shape, crops, name)"701 expected_text = "tf.batch_to_space(input, block_shape, crops, name)"702 _, unused_report, unused_errors, new_text = self._upgrade(text)703 self.assertEqual(new_text, expected_text)704 def testExtractImagePatches(self):705 text = (706 "tf.extract_image_patches(images, ksizes=ksizes, strides=strides,"707 "rates=rates, padding=padding, name=name)")708 expected_text = (709 "tf.image.extract_image_patches(images, sizes=ksizes, strides=strides,"710 "rates=rates, padding=padding, name=name)")711 _, unused_report, unused_errors, new_text = self._upgrade(text)712 self.assertEqual(new_text, expected_text)713 def testKerasSavedModel(self):714 text = (715 "tf.contrib.saved_model.save_keras_model(model, './saved_models')\n"716 "tf.contrib.saved_model.load_keras_model(saved_model_path)\n")717 expected_text = (718 "tf.keras.experimental.export_saved_model(model, './saved_models')\n"719 "tf.keras.experimental.load_from_saved_model(saved_model_path)\n")720 _, unused_report, unused_errors, new_text = self._upgrade(text)721 self.assertEqual(new_text, expected_text)722 def testStatelessMultinomial(self):723 text = (724 "tf.random.stateless_multinomial(logits, num_samples, seed, "725 "output_dtype=dtype, name=name)")726 expected_text = (727 "tf.random.stateless_categorical(logits, num_samples, seed, "728 "dtype=dtype, name=name)")729 _, unused_report, unused_errors, new_text = self._upgrade(text)730 self.assertEqual(new_text, expected_text)731 def testSoftMaxCrossEntropyWithLogitsV2(self):732 text = (733 "tf.nn.softmax_cross_entropy_with_logits_v2("734 "labels=labels, logits=logits, dim=2)")735 expected_text = (736 "tf.nn.softmax_cross_entropy_with_logits("737 "labels=labels, logits=logits, axis=2)")738 _, unused_report, errors, new_text = self._upgrade(text)739 self.assertEqual(new_text, expected_text)740 self.assertFalse(errors)741 def testSoftMaxCrossEntropyWithLogits(self):742 text = ("tf.nn.softmax_cross_entropy_with_logits("743 "labels=labels, logits=logits, dim=2)")744 expected_text = (745 "tf.nn.softmax_cross_entropy_with_logits("746 "labels=tf.stop_gradient(labels), logits=logits, axis=2)")747 _, unused_report, unused_errors, new_text = self._upgrade(text)748 self.assertEqual(new_text, expected_text)749 text = ("tf.nn.softmax_cross_entropy_with_logits("750 "labels=foo(bar))")751 expected_text = ("tf.nn.softmax_cross_entropy_with_logits("752 "labels=tf.stop_gradient(foo(bar)))")753 _, unused_report, unused_errors, new_text = self._upgrade(text)754 self.assertEqual(expected_text, new_text)755 def testSoftMaxCrossEntropyWithLogitsDoesntNest(self):756 text = ("tf.nn.softmax_cross_entropy_with_logits("757 "labels=tf.stop_gradient(labels), logits=logits, dim=2)")758 expected_text = (759 "tf.nn.softmax_cross_entropy_with_logits("760 "labels=tf.stop_gradient(labels), logits=logits, axis=2)")761 _, unused_report, unused_errors, new_text = self._upgrade(text)762 self.assertEqual(new_text, expected_text)763 text = ("tf.nn.softmax_cross_entropy_with_logits("764 "labels=tf.stop_gradient(foo(bar)))")765 expected_text = ("tf.nn.softmax_cross_entropy_with_logits("766 "labels=tf.stop_gradient(foo(bar)))")767 _, unused_report, unused_errors, new_text = self._upgrade(text)768 self.assertEqual(expected_text, new_text)769 text = ("tf.nn.softmax_cross_entropy_with_logits("770 "labels=foo())")771 expected_text = ("tf.nn.softmax_cross_entropy_with_logits("772 "labels=tf.stop_gradient(foo()))")773 _, unused_report, unused_errors, new_text = self._upgrade(text)774 self.assertEqual(expected_text, new_text)775 text = ("tf.nn.softmax_cross_entropy_with_logits("776 "labels=foo().zz())")777 expected_text = ("tf.nn.softmax_cross_entropy_with_logits("778 "labels=tf.stop_gradient(foo().zz()))")779 _, unused_report, unused_errors, new_text = self._upgrade(text)780 self.assertEqual(expected_text, new_text)781 def testSparseMatmul(self):782 text = ("tf.sparse_matmul(a, b, c, d, e, f, g)\n")783 expected_text = ("tf.linalg.matmul(a=a, b=b, transpose_a=c, transpose_b=d, "784 "a_is_sparse=e, b_is_sparse=f, name=g)\n")785 _, unused_report, unused_errors, new_text = self._upgrade(text)786 self.assertEqual(new_text, expected_text)787 def testWeightedMoments(self):788 text = "tf.nn.weighted_moments(x, axes, freq, name, kd)"789 expected_text = (790 "tf.nn.weighted_moments(x=x, axes=axes, frequency_weights=freq, "791 "name=name, keepdims=kd)")792 _, unused_report, unused_errors, new_text = self._upgrade(text)793 self.assertEqual(new_text, expected_text)794 def testSparseAdd(self):795 text = "tf.sparse.add(a, b, t)"796 expected_text = "tf.sparse.add(a=a, b=b, threshold=t)"797 _, unused_report, unused_errors, new_text = self._upgrade(text)798 self.assertEqual(new_text, expected_text)799 def testSparseConcat(self):800 text = "tf.sparse.concat(ax, inp, name, exp, concat)"801 expected_text = (802 "tf.sparse.concat(axis=ax, sp_inputs=inp, name=name, "803 "expand_nonconcat_dims=exp, axis=concat)")804 _, unused_report, unused_errors, new_text = self._upgrade(text)805 self.assertEqual(new_text, expected_text)806 def testSeparableConv2D(self):807 text = "tf.nn.separable_conv2d(inp, d, pt, strides, pad, rate, name, fmt)"808 expected_text = (809 "tf.nn.separable_conv2d(input=inp, depthwise_filter=d, "810 "pointwise_filter=pt, strides=strides, padding=pad, "811 "dilations=rate, name=name, data_format=fmt)")812 _, unused_report, unused_errors, new_text = self._upgrade(text)813 self.assertEqual(new_text, expected_text)814 def testConv2D(self):815 text = (816 "tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu, "817 "data_format)")818 expected_text = (819 "tf.nn.conv2d(input=input, filters=filter, strides=strides, "820 "padding=padding, data_format=data_format)")821 _, unused_report, unused_errors, new_text = self._upgrade(text)822 self.assertEqual(new_text, expected_text)823 text = (824 "tf.nn.conv2d(input, filter=filter, strides=strides, padding=padding, "825 "use_cudnn_on_gpu=use_cudnn_on_gpu)")826 expected_text = ("tf.nn.conv2d(input=input, filters=filter, "827 "strides=strides, padding=padding)")828 _, unused_report, unused_errors, new_text = self._upgrade(text)829 self.assertEqual(new_text, expected_text)830 def testConv2DBackpropFilter(self):831 text = (832 "tf.nn.conv2d_backprop_filter(input, filter_sizes, out_backprop, "833 "strides, padding, use_cudnn_on_gpu, data_format)")834 expected_text = (835 "tf.compat.v1.nn.conv2d_backprop_filter(input, filter_sizes, "836 "out_backprop, strides, padding, use_cudnn_on_gpu, data_format)")837 _, unused_report, unused_errors, new_text = self._upgrade(text)838 self.assertEqual(new_text, expected_text)839 def testConv2DBackpropInput(self):840 text = (841 "tf.nn.conv2d_backprop_input(input_sizes, filter, out_backprop, "842 "strides, padding, use_cudnn_on_gpu, data_format)")843 expected_text = (844 "tf.nn.conv2d_transpose(output_shape=input_sizes, filters=filter, "845 "input=out_backprop, strides=strides, padding=padding, "846 "data_format=data_format)")847 _, unused_report, unused_errors, new_text = self._upgrade(text)848 self.assertEqual(new_text, expected_text)849 def testSpacetoBatch(self):850 text = "tf.space_to_batch_nd(input, shape, paddings, name)"851 expected_text = "tf.space_to_batch(input, shape, paddings, name)"852 _, unused_report, unused_errors, new_text = self._upgrade(text)853 self.assertEqual(new_text, expected_text)854 text = "tf.nn.space_to_batch(input, paddings, block_size, name)"855 expected_text = (856 "tf.space_to_batch(input=input, paddings=paddings, "857 "block_shape=block_size, name=name)")858 _, unused_report, unused_errors, new_text = self._upgrade(text)859 self.assertEqual(new_text, expected_text)860 def testInTopK(self):861 text = "tf.math.in_top_k(a, b, c, n)"862 expected_text = (863 "tf.math.in_top_k(predictions=a, targets=b, k=c, name=n)")864 _, unused_report, unused_errors, new_text = self._upgrade(text)865 self.assertEqual(new_text, expected_text)866 def testDepthToSpace(self):867 text = "tf.nn.depth_to_space(input, block_size, name, data_format)"868 expected_text = (869 "tf.nn.depth_to_space(input=input, block_size=block_size, "870 "name=name, data_format=data_format)")871 _, unused_report, unused_errors, new_text = self._upgrade(text)872 self.assertEqual(new_text, expected_text)873 def testEmbeddingLookup(self):874 text = ("tf.nn.embedding_lookup(params, ids, partition_strategy, name, "875 "validate_indices, max_norm)")876 expected_text = ("tf.nn.embedding_lookup(params=params, ids=ids, "877 "partition_strategy=partition_strategy, name=name, "878 "max_norm=max_norm)")879 _, unused_report, unused_errors, new_text = self._upgrade(text)880 self.assertEqual(new_text, expected_text)881 def testEmbeddingLookupSparse(self):882 text = ("tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights, "883 "partition_strategy, name, combiner, max_norm)")884 expected_text = ("tf.nn.embedding_lookup_sparse(params=params, "885 "sp_ids=sp_ids, sp_weights=sp_weights, "886 "partition_strategy=partition_strategy, name=name, "887 "combiner=combiner, max_norm=max_norm)")888 _, unused_report, unused_errors, new_text = self._upgrade(text)889 self.assertEqual(new_text, expected_text)890 def testNnInTopK(self):891 text = "tf.nn.in_top_k(predictions, targets, k, name)"892 expected_text = ("tf.nn.in_top_k(predictions=predictions, "893 "targets=targets, k=k, name=name)")894 _, unused_report, unused_errors, new_text = self._upgrade(text)895 self.assertEqual(new_text, expected_text)896 def testSpaceToDepth(self):897 text = "tf.nn.space_to_depth(input, block_size, name, data_format)"898 expected_text = ("tf.nn.space_to_depth(input=input, block_size=block_size, "899 "name=name, data_format=data_format)")900 _, unused_report, unused_errors, new_text = self._upgrade(text)901 self.assertEqual(new_text, expected_text)902 def testPrint(self):903 # tf.print() cannot be parsed unless we import print_function904 text = """from __future__ import print_function905tf.print()906tf.print('abc')907"""908 _, unused_report, unused_errors, new_text = self._upgrade(text)909 self.assertEqual(new_text, text) # Text should stay the same910 def testSparseSplit(self):911 text = (912 "tf.sparse_split(sp_input=sp_input, num_split=num_split, axis=axis, "913 "name=name)")914 expected_text = (915 "tf.sparse.split(sp_input=sp_input, num_split=num_split, axis=axis, "916 "name=name)")917 _, unused_report, unused_errors, new_text = self._upgrade(text)918 self.assertEqual(new_text, expected_text)919 text = (920 "tf.sparse_split(sp_input=sp_input, num_split=num_split, "921 "name=name, split_dim=axis)")922 expected_text = (923 "tf.sparse.split(sp_input=sp_input, num_split=num_split, "924 "name=name, axis=axis)")925 _, unused_report, unused_errors, new_text = self._upgrade(text)926 self.assertEqual(new_text, expected_text)927 text = (928 "tf.sparse.split(sp_input=sp_input, num_split=num_split, "929 "name=name, split_dim=axis)")930 expected_text = (931 "tf.sparse.split(sp_input=sp_input, num_split=num_split, "932 "name=name, axis=axis)")933 _, unused_report, unused_errors, new_text = self._upgrade(text)934 self.assertEqual(new_text, expected_text)935 def testIterators(self):936 for (text, expected) in [937 ("(expr + yielding(data)).make_one_shot_iterator()",938 "tf.compat.v1.data.make_one_shot_iterator((expr + yielding(data)))"),939 ("dataset.make_one_shot_iterator()",940 "tf.compat.v1.data.make_one_shot_iterator(dataset)"),941 ("dataset.make_one_shot_iterator(shared_name=foo)",942 "tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),943 ("dataset.make_one_shot_iterator(x, y, z)",944 "tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),945 ("dataset.make_initializable_iterator()",946 "tf.compat.v1.data.make_initializable_iterator(dataset)"),947 ("ds.make_initializable_iterator(shared_name=foo)",948 "tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),949 ("dataset.make_initializable_iterator(x, y, z)",950 "tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),951 ("tf.data.make_one_shot_iterator(dataset)",952 "tf.compat.v1.data.make_one_shot_iterator(dataset)"),953 ("tf.data.make_one_shot_iterator(dataset, shared_name=foo)",954 "tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),955 ("tf.data.make_one_shot_iterator(dataset, x, y, z)",956 "tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),957 ("tf.data.make_initializable_iterator(dataset)",958 "tf.compat.v1.data.make_initializable_iterator(dataset)"),959 ("tf.data.make_initializable_iterator(ds, shared_name=foo)",960 "tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),961 ("tf.data.make_initializable_iterator(dataset, x, y, z)",962 "tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),963 ("tf.compat.v1.data.make_one_shot_iterator(dataset)",964 "tf.compat.v1.data.make_one_shot_iterator(dataset)"),965 ("tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)",966 "tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),967 ("tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)",968 "tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),969 ("tf.compat.v1.data.make_initializable_iterator(dataset)",970 "tf.compat.v1.data.make_initializable_iterator(dataset)"),971 ("tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)",972 "tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),973 ("tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)",974 "tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)")]:975 _, unused_report, unused_errors, actual = self._upgrade(text)976 self.assertEqual(actual, expected)977 def testMapAndBatch(self):978 suffix = ".data.experimental.map_and_batch_with_legacy_function(args)"979 text = "tf" + suffix980 expected = "tf.compat.v1" + suffix981 _, unused_report, unused_errors, actual = self._upgrade(text)982 self.assertEqual(actual, expected)983 def testCast(self):984 for (name, dtype) in [("int32", "int32"),985 ("int64", "int64"),986 ("float", "float32"),987 ("double", "float64"),988 ("complex64", "complex64"),989 ("complex128", "complex128"),990 ("bfloat16", "bfloat16")]:991 text = "tf.to_%s(x, name='test')" % name992 expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype993 _, unused_report, unused_errors, new_text = self._upgrade(text)994 self.assertEqual(expected_text, new_text)995 def testCastPositionalSecondArgument(self):996 for (name, dtype) in [("int32", "int32"),997 ("int64", "int64"),998 ("float", "float32"),999 ("double", "float64"),1000 ("complex64", "complex64"),1001 ("complex128", "complex128"),1002 ("bfloat16", "bfloat16")]:1003 text = "tf.to_%s(x, 'test')" % name1004 expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype1005 _, unused_report, unused_errors, new_text = self._upgrade(text)1006 self.assertEqual(expected_text, new_text)1007 def testImageResize(self):1008 for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:1009 text = "tf.image.resize_%s(i, s)" % method1010 expected_text = ("tf.image.resize(i, s, "1011 "method=tf.image.ResizeMethod.%s)" % method.upper())1012 _, unused_report, unused_errors, new_text = self._upgrade(text)1013 self.assertEqual(expected_text, new_text)1014 def testImageResizeExtraPositionalArgs(self):1015 for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:1016 text = "tf.image.resize_%s(i, s, a, p)" % method1017 expected_text = [1018 "tf.image.resize(i, s, ", "preserve_aspect_ratio=p, ",1019 "method=tf.image.ResizeMethod.%s)" % method.upper()1020 ]1021 _, unused_report, unused_errors, new_text = self._upgrade(text)1022 for s in expected_text:1023 self.assertIn(s, new_text)1024 def testCond(self):1025 text = "tf.cond(a, b, c, True)"1026 expected_text = "tf.cond(pred=a, true_fn=b, false_fn=c)"1027 _, unused_report, errors, new_text = self._upgrade(text)1028 self.assertEqual(expected_text, new_text)1029 self.assertIn("tf.cond", errors[0])1030 self.assertIn("requires manual check", errors[0])1031 def testParens(self):1032 text = """1033def _log_prob(self, x):1034 return tf.reduce_logsumexp(1035 (self.mixture_distribution.logits + self.distribution.log_prob(1036 x[..., tf.newaxis])),1037 axis=-1)"""1038 expected_text = """1039def _log_prob(self, x):1040 return tf.reduce_logsumexp(1041 input_tensor=(self.mixture_distribution.logits + self.distribution.log_prob(1042 x[..., tf.newaxis])),1043 axis=-1)"""1044 _, unused_report, unused_errors, new_text = self._upgrade(text)1045 self.assertEqual(expected_text, new_text)1046 def testAssertStatements(self):1047 for name in ["assert_greater", "assert_equal", "assert_none_equal",1048 "assert_less", "assert_negative", "assert_positive",1049 "assert_non_negative", "assert_non_positive", "assert_near",1050 "assert_less", "assert_less_equal", "assert_greater",1051 "assert_greater_equal", "assert_integer", "assert_type",1052 "assert_scalar"]:1053 text = "tf.%s(a)" % name1054 expected_text = "tf.compat.v1.%s(a)" % name1055 _, report, unused_errors, new_text = self._upgrade(text)1056 self.assertEqual(expected_text, new_text)1057 self.assertIn("%s has been" % name, report)1058 text = "tf.debugging.%s(a)" % name1059 expected_text = "tf.compat.v1.debugging.%s(a)" % name1060 _, report, unused_errors, new_text = self._upgrade(text)1061 self.assertEqual(expected_text, new_text)1062 self.assertIn("%s has been" % name, report)1063 def testAssertRankStatements(self):1064 for name in ["assert_rank", "assert_rank_at_least", "assert_rank_in"]:1065 text = "tf.%s(a)" % name1066 expected_text = "tf.compat.v1.%s(a)" % name1067 _, report, unused_errors, new_text = self._upgrade(text)1068 self.assertEqual(expected_text, new_text)1069 self.assertIn("%s has been" % name, report)1070 text = "tf.debugging.%s(a)" % name1071 expected_text = "tf.compat.v1.debugging.%s(a)" % name1072 _, report, unused_errors, new_text = self._upgrade(text)1073 self.assertEqual(expected_text, new_text)1074 self.assertIn("%s has been" % name, report)1075 def test_assert_equal_graph_def(self):1076 text = "tf.test.assert_equal_graph_def(a, b, checkpoint_v2=x)"1077 expected = "tf.test.assert_equal_graph_def(actual=a, expected=b)"1078 _, _, _, new_text = self._upgrade(text)1079 self.assertEqual(expected, new_text)1080 def test_is_tensor_upgrade(self):1081 text = "tf.contrib.framework.is_tensor(x)"1082 expected = "tf.is_tensor(x)"1083 _, _, _, new_text = self._upgrade(text)1084 self.assertEqual(expected, new_text)1085 def test_CriticalSection_upgrade(self):1086 text = "tf.contrib.framework.CriticalSection(shared_name='blah')"1087 expected = "tf.CriticalSection(shared_name='blah')"1088 _, _, _, new_text = self._upgrade(text)1089 self.assertEqual(expected, new_text)1090 def test_sample_distorted_bounding_box(self):1091 # pylint: disable=line-too-long1092 text = "tf.image.sample_distorted_bounding_box(a, b, c, d, e, f, g, h, i, j)"1093 expected = "tf.image.sample_distorted_bounding_box(image_size=a, bounding_boxes=b, seed=c, min_object_covered=e, aspect_ratio_range=f, area_range=g, max_attempts=h, use_image_if_no_bounding_boxes=i, name=j)"1094 # pylint: enable=line-too-long1095 _, _, _, new_text = self._upgrade(text)1096 self.assertEqual(expected, new_text)1097 def test_contrib_initialize(self):1098 text = "tf.contrib.summary.initialize"1099 expected = "tf.compat.v1.summary.initialize"1100 _, _, _, new_text = self._upgrade(text)1101 self.assertEqual(expected, new_text)1102 def test_contrib_framework_argsort(self):1103 text = "tf.contrib.framework.argsort"1104 expected = "tf.argsort"1105 # pylint: enable=line-too-long1106 _, _, _, new_text = self._upgrade(text)1107 self.assertEqual(expected, new_text)1108 def test_contrib_rnn_cell(self):1109 text = "tf.contrib.rnn.RNNCell"1110 expected = "tf.compat.v1.nn.rnn_cell.RNNCell"1111 # pylint: enable=line-too-long1112 _, _, _, new_text = self._upgrade(text)1113 self.assertEqual(expected, new_text)1114 def test_flags_bare(self):1115 _, _, errors, _ = self._upgrade("tf.flags")1116 self.assertIn("tf.flags has been removed", errors[0])1117 def test_flags_flags(self):1118 _, _, errors, _ = self._upgrade("tf.flags.FLAGS")1119 self.assertIn("tf.flags has been removed", errors[0])1120 def test_max_pool_2d(self):1121 text = "tf.nn.max_pool(value=4)"1122 expected_text = "tf.nn.max_pool2d(input=4)"1123 _, _, _, new_text = self._upgrade(text)1124 self.assertEqual(expected_text, new_text)1125 def test_contrib_estimator_early_stopping(self):1126 api_symbols = [1127 "make_early_stopping_hook", "stop_if_higher_hook", "stop_if_lower_hook",1128 "stop_if_no_decrease_hook", "stop_if_no_increase_hook"1129 ]1130 for symbol in api_symbols:1131 text = "tf.contrib.estimator." + symbol1132 expected_text = "tf.estimator.experimental." + symbol1133 _, _, _, new_text = self._upgrade(text)1134 self.assertEqual(expected_text, new_text)1135 def test_contrib_rnn(self):1136 api_symbols = ["BasicLSTMCell", "BasicRNNCell", "GRUCell", "LSTMCell",1137 "MultiRNNCell"]1138 for symbol in api_symbols:1139 text = "tf.contrib.rnn." + symbol1140 expected_text = "tf.compat.v1.nn.rnn_cell." + symbol1141 _, _, _, new_text = self._upgrade(text)1142 self.assertEqual(expected_text, new_text)1143 def test_contrib_summary_audio(self):1144 text = "tf.contrib.summary.audio('foo', myval, 44100, 3, 'fam', 42)"1145 expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "1146 "sample_rate=44100, max_outputs=3, step=42)")1147 _, _, errors, new_text = self._upgrade(text)1148 self.assertEqual(expected, new_text)1149 self.assertIn("'family' argument", errors[0])1150 self.assertIn("Manual check required", errors[1])1151 def test_contrib_summary_histogram(self):1152 text = "tf.contrib.summary.histogram('foo', myval, 'fam', 42)"1153 expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "1154 "step=42)")1155 _, _, errors, new_text = self._upgrade(text)1156 self.assertEqual(expected, new_text)1157 self.assertIn("'family' argument", errors[0])1158 self.assertIn("Manual check required", errors[1])1159 def test_contrib_summary_image(self):1160 text = "tf.contrib.summary.image('foo', myval, red, 3, 'fam', 42)"1161 expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "1162 "max_outputs=3, step=42)")1163 _, _, errors, new_text = self._upgrade(text)1164 self.assertEqual(expected, new_text)1165 self.assertIn("'bad_color' argument", errors[0])1166 self.assertIn("'family' argument", errors[1])1167 self.assertIn("Manual check required", errors[2])1168 def test_contrib_summary_scalar(self):1169 text = "tf.contrib.summary.scalar('foo', myval, 'fam', 42)"1170 expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "1171 "step=42)")1172 _, _, errors, new_text = self._upgrade(text)1173 self.assertEqual(expected, new_text)1174 self.assertIn("'family' argument", errors[0])1175 self.assertIn("Manual check required", errors[1])1176 def test_contrib_summary_audio_nostep(self):1177 text = "tf.contrib.summary.audio('foo', myval, 44100)"1178 expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "1179 "sample_rate=44100, "1180 "step=tf.compat.v1.train.get_or_create_global_step())")1181 _, _, errors, new_text = self._upgrade(text)1182 self.assertEqual(expected, new_text)1183 self.assertIn("'step' argument", errors[0])1184 self.assertIn("Manual check required", errors[1])1185 def test_contrib_summary_histogram_nostep(self):1186 text = "tf.contrib.summary.histogram('foo', myval)"1187 expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "1188 "step=tf.compat.v1.train.get_or_create_global_step())")1189 _, _, errors, new_text = self._upgrade(text)1190 self.assertEqual(expected, new_text)1191 self.assertIn("'step' argument", errors[0])1192 self.assertIn("Manual check required", errors[1])1193 def test_contrib_summary_image_nostep(self):1194 text = "tf.contrib.summary.image('foo', myval)"1195 expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "1196 "step=tf.compat.v1.train.get_or_create_global_step())")1197 _, _, errors, new_text = self._upgrade(text)1198 self.assertEqual(expected, new_text)1199 self.assertIn("'step' argument", errors[0])1200 self.assertIn("Manual check required", errors[1])1201 def test_contrib_summary_scalar_nostep(self):1202 text = "tf.contrib.summary.scalar('foo', myval)"1203 expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "1204 "step=tf.compat.v1.train.get_or_create_global_step())")1205 _, _, errors, new_text = self._upgrade(text)1206 self.assertEqual(expected, new_text)1207 self.assertIn("'step' argument", errors[0])1208 self.assertIn("Manual check required", errors[1])1209 def test_avg_pool_2d(self):1210 text = "tf.nn.avg_pool(value=4)"1211 expected_text = "tf.nn.avg_pool2d(input=4)"1212 _, _, _, new_text = self._upgrade(text)1213 self.assertEqual(expected_text, new_text)1214 def test_saved_model_load(self):1215 text = "tf.saved_model.load(sess, ['foo_graph'])"1216 expected = "tf.compat.v1.saved_model.load(sess, ['foo_graph'])"1217 _, _, _, new_text = self._upgrade(text)1218 self.assertEqual(expected, new_text)1219 def test_saved_model_load_v2(self):1220 text = "tf.saved_model.load_v2('/tmp/blah')"1221 expected = "tf.compat.v2.saved_model.load('/tmp/blah')"1222 _, _, _, new_text = self._upgrade(text)1223 self.assertEqual(expected, new_text)1224 def test_uniform_unit_scaling_initializer(self):1225 text = "tf.uniform_unit_scaling_initializer(0.5)"1226 expected_text = (1227 "tf.keras.initializers.VarianceScaling(" +1228 "scale=0.5, distribution=\"uniform\")")1229 _, _, _, new_text = self._upgrade(text)1230 self.assertEqual(expected_text, new_text)1231 text = "tf.initializers.uniform_unit_scaling(0.5)"1232 expected_text = (1233 "tf.keras.initializers.VarianceScaling(" +1234 "scale=0.5, distribution=\"uniform\")")1235 _, _, _, new_text = self._upgrade(text)1236 self.assertEqual(expected_text, new_text)1237 def test_name_scope(self):1238 text = "tf.name_scope(None, default_name, [some, values])"1239 expected_text = "tf.name_scope(name=default_name)"1240 _, _, _, new_text = self._upgrade(text)1241 self.assertEqual(expected_text, new_text)1242 text = "tf.name_scope(default_name=default_name, values=stuff)"1243 expected_text = "tf.name_scope(name=default_name)"1244 _, _, _, new_text = self._upgrade(text)1245 self.assertEqual(expected_text, new_text)1246 text = "tf.name_scope(name=n, default_name=d, values=s)"1247 expected_text = "tf.compat.v1.name_scope(name=n, default_name=d, values=s)"1248 _, report, _, new_text = self._upgrade(text)1249 self.assertEqual(expected_text, new_text)1250 self.assertIn("`name` passed to `name_scope`", report)1251 text = "tf.name_scope(name=None, values=stuff)"1252 _, _, errors, _ = self._upgrade(text)1253 self.assertIn("name_scope call with neither name nor default_name",1254 errors[0])1255class TestUpgradeFiles(test_util.TensorFlowTestCase):1256 def testInplace(self):1257 """Check to make sure we don't have a file system race."""1258 temp_file = tempfile.NamedTemporaryFile("w", delete=False)1259 original = "tf.conj(a)\n"1260 upgraded = "tf.math.conj(a)\n"1261 temp_file.write(original)1262 temp_file.close()1263 upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())1264 upgrader.process_file(temp_file.name, temp_file.name)1265 self.assertAllEqual(open(temp_file.name).read(), upgraded)1266 os.unlink(temp_file.name)...

Full Screen

Full Screen

tf_upgrade_v2_safety_test.py

Source:tf_upgrade_v2_safety_test.py Github

copy

Full Screen

...21from tensorflow.python.platform import test as test_lib22from tensorflow.tools.compatibility import ast_edits23from tensorflow.tools.compatibility import tf_upgrade_v2_safety24class TfUpgradeV2SafetyTest(test_util.TensorFlowTestCase):25 def _upgrade(self, old_file_text):26 in_file = six.StringIO(old_file_text)27 out_file = six.StringIO()28 upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2_safety.TFAPIChangeSpec())29 count, report, errors = (30 upgrader.process_opened_file("test.py", in_file,31 "test_out.py", out_file))32 return count, report, errors, out_file.getvalue()33 def testContribWarning(self):34 text = "tf.contrib.foo()"35 _, report, _, _ = self._upgrade(text)36 expected_info = "tf.contrib will not be distributed"37 self.assertIn(expected_info, report)38 def testTensorFlowImport(self):39 text = "import tensorflow as tf"40 expected_text = ("import tensorflow.compat.v1 as tf")41 _, _, _, new_text = self._upgrade(text)42 self.assertEqual(expected_text, new_text)43 text = "import tensorflow as tf, other_import as y"44 expected_text = ("import tensorflow.compat.v1 as tf, other_import as y")45 _, _, _, new_text = self._upgrade(text)46 self.assertEqual(expected_text, new_text)47 text = "import tensorflow"48 expected_text = ("import tensorflow.compat.v1 as tensorflow")49 _, _, _, new_text = self._upgrade(text)50 self.assertEqual(expected_text, new_text)51 text = "import tensorflow.foo"52 expected_text = "import tensorflow.compat.v1.foo"53 _, _, _, new_text = self._upgrade(text)54 self.assertEqual(expected_text, new_text)55 text = "import tensorflow.foo as bar"56 expected_text = "import tensorflow.compat.v1.foo as bar"57 _, _, _, new_text = self._upgrade(text)58 self.assertEqual(expected_text, new_text)59 def testTensorFlowGoogleImport(self):60 text = "import tensorflow.google as tf"61 expected_text = "import tensorflow.google.compat.v1 as tf"62 _, _, _, new_text = self._upgrade(text)63 self.assertEqual(expected_text, new_text)64 text = "import tensorflow.google"65 expected_text = "import tensorflow.google.compat.v1"66 _, _, _, new_text = self._upgrade(text)67 self.assertEqual(expected_text, new_text)68 text = "import tensorflow.google.compat.v1 as tf"69 expected_text = "import tensorflow.google.compat.v1 as tf"70 _, _, _, new_text = self._upgrade(text)71 self.assertEqual(expected_text, new_text)72 text = "import tensorflow.google.compat.v2 as tf"73 expected_text = "import tensorflow.google.compat.v2 as tf"74 _, _, _, new_text = self._upgrade(text)75 self.assertEqual(expected_text, new_text)76 def testTensorFlowImportInIndent(self):77 text = """78try:79 import tensorflow as tf # import line80 tf.ones([4, 5])81except AttributeError:82 pass83"""84 expected_text = """85try:86 import tensorflow.compat.v1 as tf # import line87 tf.ones([4, 5])88except AttributeError:89 pass90"""91 _, _, _, new_text = self._upgrade(text)92 self.assertEqual(expected_text, new_text)93 def testTensorFlowFromImport(self):94 text = "from tensorflow import foo"95 expected_text = "from tensorflow.compat.v1 import foo"96 _, _, _, new_text = self._upgrade(text)97 self.assertEqual(expected_text, new_text)98 text = "from tensorflow.foo import bar"99 expected_text = "from tensorflow.compat.v1.foo import bar"100 _, _, _, new_text = self._upgrade(text)101 self.assertEqual(expected_text, new_text)102 text = "from tensorflow import *"103 expected_text = "from tensorflow.compat.v1 import *"104 _, _, _, new_text = self._upgrade(text)105 self.assertEqual(expected_text, new_text)106 def testTensorFlowImportAlreadyHasCompat(self):107 text = "import tensorflow.compat.v1 as tf"108 _, _, _, new_text = self._upgrade(text)109 self.assertEqual(text, new_text)110 text = "import tensorflow.compat.v2 as tf"111 _, _, _, new_text = self._upgrade(text)112 self.assertEqual(text, new_text)113 text = "from tensorflow.compat import v2 as tf"114 _, _, _, new_text = self._upgrade(text)115 self.assertEqual(text, new_text)116 def testTensorFlowGoogleFromImport(self):117 text = "from tensorflow.google.compat import v1 as tf"118 _, _, _, new_text = self._upgrade(text)119 self.assertEqual(text, new_text)120 text = "from tensorflow.google.compat import v2 as tf"121 _, _, _, new_text = self._upgrade(text)122 self.assertEqual(text, new_text)123 def testTensorFlowDontChangeContrib(self):124 text = "import tensorflow.contrib as foo"125 _, _, _, new_text = self._upgrade(text)126 self.assertEqual(text, new_text)127 text = "from tensorflow import contrib"128 _, _, _, new_text = self._upgrade(text)129 self.assertEqual(text, new_text)130 def test_contrib_to_addons_move(self):131 small_mapping = {132 "tf.contrib.layers.poincare_normalize":133 "tfa.layers.PoincareNormalize",134 "tf.contrib.layers.maxout":135 "tfa.layers.Maxout",136 "tf.contrib.layers.group_norm":137 "tfa.layers.GroupNormalization",138 "tf.contrib.layers.instance_norm":139 "tfa.layers.InstanceNormalization",140 }141 for symbol, replacement in small_mapping.items():142 text = "{}('stuff', *args, **kwargs)".format(symbol)143 _, report, _, _ = self._upgrade(text)144 self.assertIn(replacement, report)145if __name__ == "__main__":146 test_lib.main()147 def testTensorFlowDontChangeContrib(self):148 text = "import tensorflow.contrib as foo"149 _, _, _, new_text = self._upgrade(text)150 self.assertEqual(text, new_text)151 text = "from tensorflow import contrib"152 _, _, _, new_text = self._upgrade(text)153 self.assertEqual(text, new_text)154 def test_contrib_to_addons_move(self):155 small_mapping = {156 "tf.contrib.layers.poincare_normalize":157 "tfa.layers.PoincareNormalize",158 "tf.contrib.layers.maxout":159 "tfa.layers.Maxout",160 "tf.contrib.layers.group_norm":161 "tfa.layers.GroupNormalization",162 "tf.contrib.layers.instance_norm":163 "tfa.layers.InstanceNormalization",164 }165 for symbol, replacement in small_mapping.items():166 text = "{}('stuff', *args, **kwargs)".format(symbol)167 _, report, _, _ = self._upgrade(text)168 self.assertIn(replacement, report)169if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run tempest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful