How to use scale method in ATX

Best Python code snippet using ATX

affine.py

Source:affine.py Github

copy

Full Screen

...266 array_ops.zeros([], identity_multiplier.dtype),267 ["identity_multiplier should be non-zero."])],268 identity_multiplier)269 return identity_multiplier270 scale = distribution_util.make_tril_scale(271 loc=shift,272 scale_tril=tril,273 scale_diag=diag,274 scale_identity_multiplier=identity_multiplier,275 validate_args=validate_args,276 assert_positive=False,277 shape_hint=shape_hint)278 if perturb_factor is not None:279 return linalg.LinearOperatorLowRankUpdate(280 scale,281 u=perturb_factor,282 diag_update=perturb_diag,283 is_diag_update_positive=perturb_diag is None,284 is_non_singular=True, # Implied by is_positive_definite=True.285 is_self_adjoint=True,286 is_positive_definite=True,287 is_square=True)288 return scale289 @property290 def shift(self):291 """The `shift` `Tensor` in `Y = scale @ X + shift`."""292 return self._shift293 @property294 def scale(self):295 """The `scale` `LinearOperator` in `Y = scale @ X + shift`."""296 return self._scale297 def _forward(self, x):298 y = x299 if self._is_only_identity_multiplier:300 y *= self._scale301 if self.shift is not None:302 return y + self.shift303 return y304 y, sample_shape = self._shaper.make_batch_of_event_sample_matrices(305 y, expand_batch_dim=False)306 with ops.control_dependencies(self._maybe_check_scale() if307 self.validate_args else []):308 y = self.scale.matmul(y)309 y = self._shaper.undo_make_batch_of_event_sample_matrices(310 y, sample_shape, expand_batch_dim=False)311 if self.shift is not None:312 y += self.shift313 return y314 def _inverse(self, y):315 x = y316 if self.shift is not None:317 x -= self.shift318 if self._is_only_identity_multiplier:319 return x / self._scale320 x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(321 x, expand_batch_dim=False)322 # Solve fails if the op is singular so we may safely skip this assertion.323 x = self.scale.solve(x)324 x = self._shaper.undo_make_batch_of_event_sample_matrices(325 x, sample_shape, expand_batch_dim=False)326 return x327 def _forward_log_det_jacobian(self, x):328 # is_constant_jacobian = True for this bijector, hence the329 # `log_det_jacobian` need only be specified for a single input, as this will330 # be tiled to match `event_ndims`.331 if self._is_only_identity_multiplier:332 # We don't pad in this case and instead let the fldj be applied333 # via broadcast.334 event_size = array_ops.shape(x)[-1]335 event_size = math_ops.cast(event_size, dtype=self._scale.dtype)336 return math_ops.log(math_ops.abs(self._scale)) * event_size337 return self.scale.log_abs_determinant()338 def _maybe_check_scale(self):339 try:340 return [self.scale.assert_non_singular()]341 except NotImplementedError:342 pass...

Full Screen

Full Screen

laplace_test.py

Source:laplace_test.py Github

copy

Full Screen

1# Copyright 2016 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15from __future__ import absolute_import16from __future__ import division17from __future__ import print_function18import numpy as np19from scipy import stats20import tensorflow as tf21class LaplaceTest(tf.test.TestCase):22 def testLaplaceShape(self):23 with self.test_session():24 loc = tf.constant([3.0] * 5)25 scale = tf.constant(11.0)26 laplace = tf.contrib.distributions.Laplace(loc=loc, scale=scale)27 self.assertEqual(laplace.batch_shape().eval(), (5,))28 self.assertEqual(laplace.get_batch_shape(), tf.TensorShape([5]))29 self.assertAllEqual(laplace.event_shape().eval(), [])30 self.assertEqual(laplace.get_event_shape(), tf.TensorShape([]))31 def testLaplaceLogPDF(self):32 with self.test_session():33 batch_size = 634 loc = tf.constant([2.0] * batch_size)35 scale = tf.constant([3.0] * batch_size)36 loc_v = 2.037 scale_v = 3.038 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)39 laplace = tf.contrib.distributions.Laplace(loc=loc, scale=scale)40 expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v)41 log_pdf = laplace.log_pdf(x)42 self.assertEqual(log_pdf.get_shape(), (6,))43 self.assertAllClose(log_pdf.eval(), expected_log_pdf)44 pdf = laplace.pdf(x)45 self.assertEqual(pdf.get_shape(), (6,))46 self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))47 def testLaplaceLogPDFMultidimensional(self):48 with self.test_session():49 batch_size = 650 loc = tf.constant([[2.0, 4.0]] * batch_size)51 scale = tf.constant([[3.0, 4.0]] * batch_size)52 loc_v = np.array([2.0, 4.0])53 scale_v = np.array([3.0, 4.0])54 x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T55 laplace = tf.contrib.distributions.Laplace(loc=loc, scale=scale)56 expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v)57 log_pdf = laplace.log_pdf(x)58 log_pdf_values = log_pdf.eval()59 self.assertEqual(log_pdf.get_shape(), (6, 2))60 self.assertAllClose(log_pdf_values, expected_log_pdf)61 pdf = laplace.pdf(x)62 pdf_values = pdf.eval()63 self.assertEqual(pdf.get_shape(), (6, 2))64 self.assertAllClose(pdf_values, np.exp(expected_log_pdf))65 def testLaplaceLogPDFMultidimensionalBroadcasting(self):66 with self.test_session():67 batch_size = 668 loc = tf.constant([[2.0, 4.0]] * batch_size)69 scale = tf.constant(3.0)70 loc_v = np.array([2.0, 4.0])71 scale_v = 3.072 x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T73 laplace = tf.contrib.distributions.Laplace(loc=loc, scale=scale)74 expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v)75 log_pdf = laplace.log_pdf(x)76 log_pdf_values = log_pdf.eval()77 self.assertEqual(log_pdf.get_shape(), (6, 2))78 self.assertAllClose(log_pdf_values, expected_log_pdf)79 pdf = laplace.pdf(x)80 pdf_values = pdf.eval()81 self.assertEqual(pdf.get_shape(), (6, 2))82 self.assertAllClose(pdf_values, np.exp(expected_log_pdf))83 def testLaplaceCDF(self):84 with self.test_session():85 batch_size = 686 loc = tf.constant([2.0] * batch_size)87 scale = tf.constant([3.0] * batch_size)88 loc_v = 2.089 scale_v = 3.090 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)91 laplace = tf.contrib.distributions.Laplace(loc=loc, scale=scale)92 expected_cdf = stats.laplace.cdf(x, loc_v, scale=scale_v)93 cdf = laplace.cdf(x)94 self.assertEqual(cdf.get_shape(), (6,))95 self.assertAllClose(cdf.eval(), expected_cdf)96 def testLaplaceMean(self):97 with self.test_session():98 loc_v = np.array([1.0, 3.0, 2.5])99 scale_v = np.array([1.0, 4.0, 5.0])100 laplace = tf.contrib.distributions.Laplace(loc=loc_v, scale=scale_v)101 expected_means = stats.laplace.mean(loc_v, scale=scale_v)102 self.assertEqual(laplace.mean().get_shape(), (3,))103 self.assertAllClose(laplace.mean().eval(), expected_means)104 def testLaplaceMode(self):105 with self.test_session():106 loc_v = np.array([0.5, 3.0, 2.5])107 scale_v = np.array([1.0, 4.0, 5.0])108 laplace = tf.contrib.distributions.Laplace(loc=loc_v, scale=scale_v)109 self.assertEqual(laplace.mode().get_shape(), (3,))110 self.assertAllClose(laplace.mode().eval(), loc_v)111 def testLaplaceVariance(self):112 with self.test_session():113 loc_v = np.array([1.0, 3.0, 2.5])114 scale_v = np.array([1.0, 4.0, 5.0])115 laplace = tf.contrib.distributions.Laplace(loc=loc_v, scale=scale_v)116 expected_variances = stats.laplace.var(loc_v, scale=scale_v)117 self.assertEqual(laplace.variance().get_shape(), (3,))118 self.assertAllClose(laplace.variance().eval(), expected_variances)119 def testLaplaceStd(self):120 with self.test_session():121 loc_v = np.array([1.0, 3.0, 2.5])122 scale_v = np.array([1.0, 4.0, 5.0])123 laplace = tf.contrib.distributions.Laplace(loc=loc_v, scale=scale_v)124 expected_std = stats.laplace.std(loc_v, scale=scale_v)125 self.assertEqual(laplace.std().get_shape(), (3,))126 self.assertAllClose(laplace.std().eval(), expected_std)127 def testLaplaceEntropy(self):128 with self.test_session():129 loc_v = np.array([1.0, 3.0, 2.5])130 scale_v = np.array([1.0, 4.0, 5.0])131 expected_entropy = stats.laplace.entropy(loc_v, scale=scale_v)132 laplace = tf.contrib.distributions.Laplace(loc=loc_v, scale=scale_v)133 self.assertEqual(laplace.entropy().get_shape(), (3,))134 self.assertAllClose(laplace.entropy().eval(), expected_entropy)135 def testLaplaceSample(self):136 with tf.Session():137 loc_v = 4.0138 scale_v = 3.0139 loc = tf.constant(loc_v)140 scale = tf.constant(scale_v)141 n = 100000142 laplace = tf.contrib.distributions.Laplace(loc=loc, scale=scale)143 samples = laplace.sample(n, seed=137)144 sample_values = samples.eval()145 self.assertEqual(samples.get_shape(), (n,))146 self.assertEqual(sample_values.shape, (n,))147 self.assertAllClose(sample_values.mean(),148 stats.laplace.mean(loc_v, scale=scale_v),149 rtol=0.05, atol=0.)150 self.assertAllClose(sample_values.var(),151 stats.laplace.var(loc_v, scale=scale_v),152 rtol=0.05, atol=0.)153 self.assertTrue(self._kstest(loc_v, scale_v, sample_values))154 def testLaplaceSampleMultiDimensional(self):155 with tf.Session():156 loc_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100157 scale_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1158 laplace = tf.contrib.distributions.Laplace(loc=loc_v, scale=scale_v)159 n = 10000160 samples = laplace.sample(n, seed=137)161 sample_values = samples.eval()162 self.assertEqual(samples.get_shape(), (n, 10, 100))163 self.assertEqual(sample_values.shape, (n, 10, 100))164 zeros = np.zeros_like(loc_v + scale_v) # 10 x 100165 loc_bc = loc_v + zeros166 scale_bc = scale_v + zeros167 self.assertAllClose(168 sample_values.mean(axis=0),169 stats.laplace.mean(loc_bc, scale=scale_bc),170 rtol=0.35, atol=0.)171 self.assertAllClose(172 sample_values.var(axis=0),173 stats.laplace.var(loc_bc, scale=scale_bc),174 rtol=0.10, atol=0.)175 fails = 0176 trials = 0177 for ai, a in enumerate(np.reshape(loc_v, [-1])):178 for bi, b in enumerate(np.reshape(scale_v, [-1])):179 s = sample_values[:, bi, ai]180 trials += 1181 fails += 0 if self._kstest(a, b, s) else 1182 self.assertLess(fails, trials * 0.03)183 def _kstest(self, loc, scale, samples):184 # Uses the Kolmogorov-Smirnov test for goodness of fit.185 ks, _ = stats.kstest(samples, stats.laplace(loc, scale=scale).cdf)186 # Return True when the test passes.187 return ks < 0.02188 def testLaplacePdfOfSampleMultiDims(self):189 with tf.Session() as sess:190 laplace = tf.contrib.distributions.Laplace(191 loc=[7., 11.], scale=[[5.], [6.]])192 num = 50000193 samples = laplace.sample(num, seed=137)194 pdfs = laplace.pdf(samples)195 sample_vals, pdf_vals = sess.run([samples, pdfs])196 self.assertEqual(samples.get_shape(), (num, 2, 2))197 self.assertEqual(pdfs.get_shape(), (num, 2, 2))198 self.assertAllClose(199 stats.laplace.mean([[7., 11.], [7., 11.]],200 scale=np.array([[5., 5.], [6., 6.]])),201 sample_vals.mean(axis=0),202 rtol=0.05, atol=0.)203 self.assertAllClose(204 stats.laplace.var([[7., 11.], [7., 11.]],205 scale=np.array([[5., 5.], [6., 6.]])),206 sample_vals.var(axis=0),207 rtol=0.05, atol=0.)208 self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)209 self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)210 self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)211 self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)212 def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):213 s_p = zip(sample_vals, pdf_vals)214 prev = (0, 0)215 total = 0216 for k in sorted(s_p, key=lambda x: x[0]):217 pair_pdf = (k[1] + prev[1]) / 2218 total += (k[0] - prev[0]) * pair_pdf219 prev = k220 self.assertNear(1., total, err=err)221 def testLaplaceNonPositiveInitializationParamsRaises(self):222 with self.test_session():223 loc_v = tf.constant(0.0, name="loc")224 scale_v = tf.constant(-1.0, name="scale")225 laplace = tf.contrib.distributions.Laplace(226 loc=loc_v, scale=scale_v, validate_args=True)227 with self.assertRaisesOpError("scale"):228 laplace.mean().eval()229 loc_v = tf.constant(1.0, name="loc")230 scale_v = tf.constant(0.0, name="scale")231 laplace = tf.contrib.distributions.Laplace(232 loc=loc_v, scale=scale_v, validate_args=True)233 with self.assertRaisesOpError("scale"):234 laplace.mean().eval()235 def testLaplaceWithSoftplusScale(self):236 with self.test_session():237 loc_v = tf.constant([0.0, 1.0], name="loc")238 scale_v = tf.constant([-1.0, 2.0], name="scale")239 laplace = tf.contrib.distributions.LaplaceWithSoftplusScale(240 loc=loc_v, scale=scale_v)241 self.assertAllClose(tf.nn.softplus(scale_v).eval(), laplace.scale.eval())242 self.assertAllClose(loc_v.eval(), laplace.loc.eval())243if __name__ == "__main__":...

Full Screen

Full Screen

mvn_diag_plus_low_rank.py

Source:mvn_diag_plus_low_rank.py Github

copy

Full Screen

...192 loc, scale_diag, scale_identity_multiplier, scale_perturb_factor,193 scale_perturb_diag]):194 has_low_rank = (scale_perturb_factor is not None or195 scale_perturb_diag is not None)196 scale = distribution_util.make_diag_scale(197 loc=loc,198 scale_diag=scale_diag,199 scale_identity_multiplier=scale_identity_multiplier,200 validate_args=validate_args,201 assert_positive=has_low_rank)202 scale_perturb_factor = _convert_to_tensor(203 scale_perturb_factor,204 name="scale_perturb_factor")205 scale_perturb_diag = _convert_to_tensor(206 scale_perturb_diag,207 name="scale_perturb_diag")208 if has_low_rank:209 scale = linalg.LinearOperatorLowRankUpdate(210 scale,...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run ATX automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful