How to use _compare method in autotest

Best Python code snippet using autotest_python

cast_op_test.py

Source:cast_op_test.py Github

copy

Full Screen

...102 self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))103 # Special values like int32max, int64min, inf, -inf, nan casted to104 # integer values in somewhat unexpected ways. And they behave105 # differently on CPU and GPU.106 def _compare(self, x, dst_dtype, expected, use_gpu=False):107 np.testing.assert_equal(108 self._cast(109 x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))110 def testIntToFloatBoundary(self):111 i4 = np.iinfo(np.int32)112 i8 = np.iinfo(np.int64)113 self._compare(i4.min, np.float32, i4.min, False)114 self._compare(i4.max, np.float32, i4.max, False)115 self._compare(i8.min, np.float32, i8.min, False)116 self._compare(i8.max, np.float32, i8.max, False)117 self._compare(i4.min, np.float64, i4.min, False)118 self._compare(i4.max, np.float64, i4.max, False)119 self._compare(i8.min, np.float64, i8.min, False)120 self._compare(i8.max, np.float64, i8.max, False)121 # NOTE: GPU does not support int32/int64 for casting.122 def testInfNan(self):123 i4 = np.iinfo(np.int32)124 i8 = np.iinfo(np.int64)125 self._compare(np.inf, np.float32, np.inf, False)126 self._compare(np.inf, np.float64, np.inf, False)127 if sys.byteorder == "big":128 self._compare(np.inf, np.int32, i4.max, False)129 self._compare(np.inf, np.int64, i8.max, False)130 else:131 # np.float64("np.inf").astype(np.int32) is negative on x86 but positive on ppc64le132 # Numpy link to relevant discussion - https://github.com/numpy/numpy/issues/9040133 # Tensorflow link to relevant discussion - https://github.com/tensorflow/tensorflow/issues/9360134 if platform.machine() == "ppc64le":135 self._compare(-np.inf, np.int32, i4.min, False)136 self._compare(-np.inf, np.int64, i8.min, False)137 else:138 self._compare(np.inf, np.int32, i4.min, False)139 self._compare(np.inf, np.int64, i8.min, False)140 self._compare(-np.inf, np.float32, -np.inf, False)141 self._compare(-np.inf, np.float64, -np.inf, False)142 self._compare(-np.inf, np.int32, i4.min, False)143 self._compare(-np.inf, np.int64, i8.min, False)144 self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)145 self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)146 self._compare(np.nan, np.int32, i4.min, False)147 self._compare(np.nan, np.int64, i8.min, False)148 self._compare(np.inf, np.float32, np.inf, True)149 self._compare(np.inf, np.float64, np.inf, True)150 self._compare(-np.inf, np.float32, -np.inf, True)151 self._compare(-np.inf, np.float64, -np.inf, True)152 self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)153 self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)154 def _OpError(self, x, dtype, err):155 with self.test_session():156 with self.assertRaisesOpError(err):157 math_ops.cast(x, dtype).eval()158 def testNotImplemented(self):159 self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int64.*string.*")160 def testCastToTypeOfVariable(self):161 with self.test_session() as sess:162 x = variables.Variable(5, dtype=dtypes.float32)163 y = variables.Variable(True, dtype=dtypes.bool)164 cast = math_ops.cast(y, x.dtype)165 variables.global_variables_initializer().run()...

Full Screen

Full Screen

batch_matmul_op_test.py

Source:batch_matmul_op_test.py Github

copy

Full Screen

...60 z1 = np.array([(2.+2.j), (-2.+2.j), (2.-2.j), (2.+2.j)]).reshape([1, 2, 2])61 self.assertTrue(np.array_equal(z0, z1))62 # Compares _tfpBatchMatmul(x, y, alpha, adj) and _npBatchMatMul(x, y, alpha,63 # adj)64 def _compare(self, x, y, adj_x, adj_y, use_gpu=False):65 with self.test_session(use_gpu=use_gpu):66 z0 = tf.batch_matmul(x, y, adj_x=adj_x, adj_y=adj_y)67 z0_val = z0.eval()68 z1 = self._npBatchMatmul(x, y, adj_x, adj_y)69 self.assertShapeEqual(z1, z0)70 if z0_val.size != 0:71 err = (np.abs(z0_val - z1) / np.maximum(1, np.abs(z0_val))).max()72 tf.logging.info("error = %f", err)73 self.assertTrue(err < 1e-4)74 # Returns a random float np of "shape".75 def _randFloat(self, shape):76 vals = np.random.normal(0, 1, np.prod(shape)).reshape(shape)77 return np.array(vals, dtype=np.float32)78 def testSimpleFloat(self):79 for use_gpu in [False, True]:80 self._compare(self._randFloat([7, 2, 3]), self._randFloat([7, 3, 5]),81 False, False, use_gpu)82 self._compare(self._randFloat([7, 2, 3]), self._randFloat([7, 5, 3]),83 False, True, use_gpu)84 self._compare(self._randFloat([7, 3, 2]), self._randFloat([7, 3, 5]),85 True, False, use_gpu)86 self._compare(self._randFloat([7, 3, 2]), self._randFloat([7, 5, 3]),87 True, True, use_gpu)88 def testLargeFloat(self):89 for use_gpu in [False, True]:90 self._compare(self._randFloat([10, 64, 75]),91 self._randFloat([10, 75, 30]), False, False, use_gpu)92 self._compare(self._randFloat([10, 75, 64]),93 self._randFloat([10, 75, 30]), True, False, use_gpu)94 self._compare(self._randFloat([10, 64, 75]),95 self._randFloat([10, 30, 75]), False, True, use_gpu)96 self._compare(self._randFloat([10, 75, 64]),97 self._randFloat([10, 30, 75]), True, True, use_gpu)98 def testHighNDims(self):99 for use_gpu in [False, True]:100 self._compare(self._randFloat([5, 7, 2, 3]),101 self._randFloat([5, 7, 3, 5]), False, False, use_gpu)102 self._compare(self._randFloat([5, 7, 3, 2]),103 self._randFloat([5, 7, 3, 5]), True, False, use_gpu)104 self._compare(self._randFloat([5, 7, 2, 3]),105 self._randFloat([5, 7, 5, 3]), False, True, use_gpu)106 self._compare(self._randFloat([5, 7, 3, 2]),107 self._randFloat([5, 7, 5, 3]), True, True, use_gpu)108 # Returns a random complex numpy array of "shape".109 def _randComplex(self, shape):110 real = np.random.normal(0, 1, np.prod(shape))111 imag = np.random.normal(0, 1, np.prod(shape))112 vals = [np.complex(v[0], v[1]) for v in zip(real, imag)]113 return np.array(vals, dtype=np.complex64).reshape(shape)114 def testSimpleComplex(self):115 self._compare(self._randComplex([7, 2, 3]),116 self._randComplex([7, 3, 5]), False, False)117 self._compare(self._randComplex([7, 2, 3]),118 self._randComplex([7, 5, 3]), False, True)119 self._compare(self._randComplex([7, 3, 2]),120 self._randComplex([7, 3, 5]), True, False)121 self._compare(self._randComplex([7, 3, 2]),122 self._randComplex([7, 5, 3]), True, True)123 def testLargeComplex(self):124 self._compare(self._randComplex([10, 64, 75]),125 self._randComplex([10, 75, 30]), False,126 False)127 self._compare(self._randComplex([10, 64, 75]),128 self._randComplex([10, 30, 75]), False, True)129 self._compare(self._randComplex([10, 75, 64]),130 self._randComplex([10, 75, 30]), True, False)131 self._compare(self._randComplex([10, 75, 64]),132 self._randComplex([10, 30, 75]), True, True)133 def testEmpty(self):134 self._compare(np.zeros([0, 3, 2]).astype(np.float32),135 np.zeros([0, 2, 4]).astype(np.float32), False, False)136 self._compare(np.zeros([3, 2, 0]).astype(np.float32),137 np.zeros([3, 0, 5]).astype(np.float32), False, False)138 self._compare(np.zeros([3, 0, 2]).astype(np.float32),139 np.zeros([3, 2, 5]).astype(np.float32), False, False)140 self._compare(np.zeros([3, 3, 2]).astype(np.float32),141 np.zeros([3, 2, 0]).astype(np.float32), False, False)142class BatchMatmulGradientTest(tf.test.TestCase):143 # loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the144 # gradient checker.145 def _checkGrad(self, x, y, adj_x, adj_y):146 assert 3 == x.ndim147 assert 3 == y.ndim148 with self.test_session():149 inx = tf.convert_to_tensor(x)150 iny = tf.convert_to_tensor(y)151 z = tf.batch_matmul(inx, iny, adj_x, adj_y)152 loss = tf.reduce_sum(z)153 epsilon = 1e-2154 ((x_jacob_t, x_jacob_n),155 (y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(156 [inx, iny],157 [x.shape, y.shape],158 loss,159 [1],160 x_init_value=[x, y],161 delta=epsilon)162 tf.logging.info("x_jacob_t = %s", x_jacob_t.reshape(x.shape))163 tf.logging.info("x_jacob_n = %s", x_jacob_n.reshape(x.shape))164 self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)165 tf.logging.info("y_jacob_t = %s", y_jacob_t.reshape(y.shape))166 tf.logging.info("y_jacob_n = %s", y_jacob_n.reshape(y.shape))167 self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=epsilon)168 # Tests a batched matmul of x, and y: x is a 3D tensor of shape [b,169 # n, k] y is a 3D tensor of shape [b, k, m] the batched matmul170 # computes z of shape [b, n, m], where z[i, :, :] = x[i, :, :]171 # matmul y[i, :, :]172 def _compare(self, b, n, k, m):173 x = np.random.normal(0, 1, b * n * k).astype(np.float32).reshape([b, n, k])174 y = np.random.normal(0, 1, b * k * m).astype(np.float32).reshape([b, k, m])175 self._checkGrad(x, y, False, False)176 self._checkGrad(x.reshape([b, k, n]), y, True, False)177 self._checkGrad(x, y.reshape([b, m, k]), False, True)178 self._checkGrad(x.reshape([b, k, n]), y.reshape([b, m, k]), True, True)179 def testSmall(self):180 self._compare(1, 2, 3, 5)181 def testMedium(self):182 self._compare(3, 4, 7, 10)183 # Can't do testLarge using very large inputs because gradient184 # checker will take way too long time.185if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful