How to use check_function method in pytractor

Best Python code snippet using pytractor_python

test_top_level1.py

Source:test_top_level1.py Github

copy

Full Screen

...5import nnvm.symbol as sym6import nnvm.compiler7from nnvm.testing.config import ctx_list8from nnvm.testing.check_computation import check_function9def test_check_function():10 # test the testing function11 x = sym.Variable("x")12 y = sym.Variable("y")13 # different styles of returning gradients from the backward function14 check_function(x + 2*y, lambda x, y: x + 2*y,15 lambda x, y, head_grads: [head_grads, 2*head_grads],16 shape={'x': (1, 2), y: (1, 2)}, dtype='float32')17 check_function(x + 2*y, lambda x, y: x + 2*y,18 lambda x, y, head_grads: (head_grads, 2*head_grads),19 shape={'x': (1, 2), y: (1, 2)}, dtype='float32')20 check_function(x + 2*y, lambda x, y: x + 2*y,21 lambda x, y, head_grads: {'x': head_grads, 'y': 2*head_grads},22 shape={'x': (1, 2), y: (1, 2)}, dtype='float32')23 check_function(x + 2*y, lambda x, y: x + 2*y,24 lambda x, y, head_grads: {'y': 2*head_grads},25 shape={'x': (1, 2), y: (1, 2)}, dtype='float32')26 check_function(x + 2*y, lambda x, y: x + 2*y,27 lambda x, y, head_grads: [2*head_grads],28 grad_input_vars=[y],29 shape={'x': (1, 2), y: (1, 2)}, dtype='float32')30 check_function(x + 2*y, lambda x, y: x + 2*y,31 lambda x, y, head_grads: 2*head_grads,32 grad_input_vars=[y],33 shape={'x': (1, 2), y: (1, 2)}, dtype='float32')34 check_function(x + 2*y, lambda x, y: x + 2*y,35 lambda x, y, head_grads: 2*head_grads,36 grad_input_vars=[y],37 shape={'x': (1, 2), y: (1, 2)}, dtype='float64')38 # test just numerical gradients39 # different styles of shape and dtype passing40 check_function(x + 2*y, shape={'x': (1, 2), y: (1, 2)},41 numerical_grads=True)42 check_function(x + 2*y, shape={'x': (1, 2), y: (1, 2)}, dtype='float32',43 numerical_grads=True)44 check_function(x + 2*y, shape={'x': (1, 2), y: (1, 2)}, dtype={x: 'float32', 'y': 'float32'},45 numerical_grads=True)46 check_function(x + 2*y, shape=(1, 2), dtype='float32',47 numerical_grads=True)48 # specifying variable attributes on variable creation49 # (in this case type codes must be used)50 x = sym.Variable("x", dtype=0, shape=(1, 2))51 check_function(x + 2*y, shape={y: (1, 2)}, dtype={'y': 'float32'}, numerical_grads=True)52 y = sym.Variable("y", dtype=0, shape=(1, 2))53 # shape overriding54 def _fwd1(x, y):55 assert x.shape == (1, 1)56 assert y.shape == (1, 2)57 return x + 2*y58 check_function(x + 2*y, _fwd1, shape={x: (1, 1)})59 # in_range60 def _fwd2(x, y):61 assert x.shape == (100,)62 assert (x <= 0.9).all()63 assert (x >= 0.8).all()64 return x + 2*y65 check_function(x + 2*y, _fwd2, shape=(100,), in_range=(0.8, 0.9), numerical_grads=False)66 check_function(x + 2*y, _fwd2, shape=(100,), in_range={'x': (0.8, 0.9)}, numerical_grads=False)67 check_function(x + 2*y, backward=lambda x, y, head_grads: [1.0, 2.0],68 in_range={'head_grads_0': (1.0, 1.0)})69 # explicit passing of values70 check_function(x + 2*y, backward=lambda x, y, head_grads: [1.0, 2.0],71 values={'head_grads_0': np.full((1, 2), 1.0)})72 # check that the function reports errors73 def _check_function_must_fail(*args, **kwargs):74 error = AssertionError75 if 'error' in kwargs:76 error = kwargs['error']77 del kwargs['error']78 try:79 check_function(*args, quiet=True, **kwargs)80 except error:81 pass82 else:83 raise AssertionError("check_function didn't raise an exception")84 _check_function_must_fail(x + 2*y, error=ValueError)85 _check_function_must_fail(x + 2*y, lambda x, y: x + y)86 _check_function_must_fail(x + 2*y, backward=lambda x, y, head_grads: [1.0, 2.0])87 _check_function_must_fail(sym.block_grad(x + 2*y), numerical_grads=True)88 _check_function_must_fail(x*x, numerical_grads=True,89 numerical_grads_params={'atol': 0.0, 'rtol': 0.0})90 _check_function_must_fail(sym.log(-x*x), numerical_grads=True, error=ValueError)91 # different styles of returning results from the forward function92 check_function(x + 2*y, lambda x, y: [x + 2*y], numerical_grads=False)93 _check_function_must_fail(x + 2*y, lambda x, y: [x + 2*y, x], numerical_grads=False,94 error=ValueError)95 _check_function_must_fail(x + 2*y, lambda x, y: [], numerical_grads=False,96 error=ValueError)97 # multiple outputs98 z = sym.Group([2*x + y, x + 2*y])99 check_function(z, lambda x, y: [2*x + y, x + 2*y])100 check_function(z, lambda x, y: (2*x + y, x + 2*y))101 check_function(z, backward=lambda x, y, head_grads: [2*head_grads[0] + head_grads[1],102 head_grads[0] + 2*head_grads[1]])103 _check_function_must_fail(z, backward=lambda x, y, head_grads: [2*head_grads[0],104 2*head_grads[1]])105 check_function(z, backward=lambda x, y, head_grads: [head_grads[1], 2*head_grads[1]],106 in_range={'head_grads_0': (0, 0)})107 check_function(z, numerical_grads=True)108 z = sym.Group([sym.block_grad(2*x + y), x + 2*y])109 check_function(z, lambda x, y: [2*x + y, x + 2*y], numerical_grads=False)110 _check_function_must_fail(z, lambda x, y: [2*x + y, x + 2*y])111 _check_function_must_fail(z, numerical_grads=True)112 z = sym.Group([2*x + y, sym.block_grad(x + 2*y)])113 _check_function_must_fail(z, numerical_grads=True)114 z = sym.Group([2*x + y, x + 2*y, x, y, sym.sum(x)])115 check_function(z, lambda x, y: [2*x + y, x + 2*y, x, y, np.sum(x)])116 # passing additional parameters to forward and backward117 def _fwd3(x, p):118 assert p == 'v'119 return x + 1120 def _bwd3(x, p, head_grads):121 assert p == 'v'122 return head_grads123 check_function(x + 1, _fwd3, _bwd3, additional_params={'p': 'v'})124 # implicitly created variables and shape/dtype inference for inputs125 x = sym.Variable("x", shape=(2, 3), dtype=0)126 b = sym.Variable("b")127 y = sym.dense(data=x, bias=b, units=4)128 # Don't check gradients on cuda because is doesn't yet support ewise after reduce129 check_function(y, exclude_targets={'cuda'}, numerical_grads=True)130 check_function(y, shape={'x': (3, 4)}, exclude_targets={'cuda'}, numerical_grads=True)131 check_function(y, dtype={'x': 'float64'}, exclude_targets={'cuda'}, numerical_grads=True)132 x = sym.Variable("x")133 b = sym.Variable("b")134 w = sym.Variable("w")135 y = sym.dense(data=x, bias=b, weight=w, units=4)136 def _fwd_dense(x, w, b):137 return np.dot(x, w.T) + b138 check_function(y, _fwd_dense, shape={'x': (1,2)}, dtype={'x': 'float32'}, numerical_grads=False)139 check_function(y, _fwd_dense, shape={'x': (1,2)}, dtype={'w': 'float64'}, numerical_grads=False)140 _check_function_must_fail(y, _fwd_dense, shape={'x': (1,2)},141 dtype={'w': 'float64', 'b': 'float32'},142 numerical_grads=False,143 error=nnvm._base.NNVMError)144 # fails because no shape145 _check_function_must_fail(y, _fwd_dense, numerical_grads=False, error=ValueError)146 # ok because type is float32 by default147 check_function(y, _fwd_dense, shape={'x': (1,2)}, numerical_grads=False)148def test_relu():149 x = sym.Variable("x")150 y = sym.relu(sym.leaky_relu(x, alpha=0.3) - 0.2)151 def forward(x):152 x = (x < 0) * x * 0.3 + (x > 0) * x - 0.2153 return (x > 0) * x154 def backward(head_grads, x):155 sub = (x < 0) * x * 0.3 + (x > 0) * x - 0.2156 return [(sub > 0).astype("float") * \157 ((x > 0).astype("float") + 0.3 * (x < 0).astype("float")) * head_grads]158 shape = {'x': (1, 3, 32, 32)}159 check_function(y, forward, backward, shape=shape)160def test_prelu_nchw():161 x = sym.Variable("x")162 a = sym.Variable("a")163 y = sym.prelu(data=x, alpha=a)164 def forward(x, a):165 return (x < 0) * (x * a.reshape(3, 1, 1)) + (x>=0) * x166 shape = {'x': (1, 3, 32, 32), 'a': (3,)}167 check_function(y, forward, shape=shape)168def test_prelu_nhwc():169 x = sym.Variable("x")170 a = sym.Variable("a")171 y = sym.prelu(data=x, alpha=a, axis=3)172 def forward(x, a):173 return (x < 0) * (x * a.reshape(1, 1, 3)) + (x>=0) * x174 shape = {'x': (1, 32, 32, 3), 'a': (3,)}175 check_function(y, forward, shape=shape)176def test_sym_scalar_pow():177 scalar = 3178 x = sym.Variable("x")179 y = x**scalar180 def forward(x):181 return x**scalar182 def backward(head_grads, x):183 return [scalar * x**(scalar - 1) * head_grads]184 shape = {'x': (1, 3, 32, 32)}185 check_function(y, forward, backward, shape=shape)186def test_scalar_sym_pow():187 scalar = 3188 x = sym.Variable("x")189 y = scalar**x190 def forward(x):191 return scalar**x192 def backward(head_grads, x):193 return [np.log(scalar) * scalar**x * head_grads]194 shape = {'x': (1, 3, 32, 32)}195 check_function(y, forward, backward, shape=shape)196def test_exp():197 x = sym.Variable("x")198 y = sym.exp(x)199 def forward(x):200 return np.exp(x)201 def backward(head_grads, x):202 return [np.exp(x) * head_grads]203 shape = {'x': (1, 3, 32, 32)}204 check_function(y, forward, backward, shape=shape)205def test_log():206 x = sym.Variable("x")207 y = sym.log(x)208 def forward(x):209 return np.log(x)210 def backward(head_grads, x):211 return [1. / x * head_grads]212 shape = {'x': (1, 3, 32, 32)}213 check_function(y, forward, backward, in_range=(0.002, 2.0), shape=shape)214def test_tanh():215 x = sym.Variable("x")216 y = sym.tanh(x)217 def forward(x):218 return np.sinh(x) / np.cosh(x)219 def backward(head_grads, x):220 y_np = forward(x)221 return [(1 - y_np**2) * head_grads]222 shape = {'x': (1, 3, 32, 32)}223 check_function(y, forward, backward, shape=shape)224def test_sigmoid():225 x = sym.Variable("x")226 y = sym.sigmoid(x)227 def forward(x):228 return 1.0 / (1.0 + np.exp(-x))229 def backward(head_grads, x):230 y_np = forward(x)231 return [y_np *(1 - y_np) * head_grads]232 shape = {'x': (1, 3, 32, 32)}233 check_function(y, forward, backward, shape=shape)234def test_softmax():235 x = sym.Variable("x")236 y = sym.softmax(x)237 def forward(x):238 return topi.testing.softmax_python(x)239 def backward(head_grads, x):240 y = topi.testing.softmax_python(x)241 grad = y * (head_grads - np.sum(y * head_grads, axis=1, keepdims=True))242 return [grad]243 check_function(y, forward, backward,244 shape={'x': (10, 1000)}, numerical_grads=False)245 check_function(y, forward, backward,246 shape={'x': (2, 10)})247def test_log_softmax():248 x = sym.Variable("x")249 y = sym.log_softmax(x)250 def forward(x):251 return topi.testing.log_softmax_python(x)252 def backward(head_grads, x):253 y = topi.testing.log_softmax_python(x)254 grad = head_grads - np.exp(y) * np.sum(head_grads, axis=1, keepdims=True)255 return [grad]256 check_function(y, forward, backward,257 shape={'x': (10, 1000)}, numerical_grads=False)258 check_function(y, forward, backward,259 shape={'x': (2, 10)})260def test_dense():261 x = sym.Variable("x", shape=(10, 100))262 w = sym.Variable("dense_weight", shape=(3, 100))263 b = sym.Variable("dense_bias", shape=(3,))264 y = sym.dense(x, w, b, use_bias=True, units=3, name="dense")265 y = sym.flatten(y)266 def forward(x, dense_weight, dense_bias):267 return np.dot(x, dense_weight.T) + dense_bias268 shape = {269 'x': (10, 100),270 'w': (3, 100),271 'b': (3,)272 }273 # Don't check gradients on cuda because is doesn't yet support ewise after reduce274 check_function(y, forward, shape=shape,275 exclude_targets={'cuda'}, numerical_grads=True)276 check_function(y, forward, shape=shape,277 only_targets={'cuda'}, numerical_grads=False)278def test_batchnorm():279 x = sym.Variable("x")280 beta = sym.Variable("beta")281 gamma = sym.Variable("gamma")282 moving_var = sym.Variable("moving_var")283 moving_mean = sym.Variable("moving_mean")284 eps = 1e-5285 y = sym.batch_norm(286 x, gamma, beta, moving_mean, moving_var, epsilon=eps)287 def forward(x, gamma, beta, moving_mean, moving_var):288 return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta289 shape = {290 'x': (10, 20),291 'gamma': (20,),292 'beta': (20,),293 'moving_mean': (20,),294 'moving_var': (20,)295 }296 check_function(y, forward, in_range=(0.001, 1.0), shape=shape)297def verify_concatenate(ishape, axis):298 x = [sym.Variable("x%d" % i, shape=ishape[i]) for i in range(len(ishape))]299 y = sym.concatenate(*x, axis=axis) + 1300 def forward(**kwargs):301 return np.concatenate(list(kwargs.values()), axis=axis) + 1302 check_function(y, forward)303def test_concatenate():304 verify_concatenate([(2, 3, 4), (1, 3, 4)], axis=0)305 verify_concatenate([(2, 4), (2, 7)], axis=1)306def verify_split(ishape, indices_or_sections, axis):307 x = sym.Variable("x", shape=ishape)308 y = sym.split(x, indices_or_sections=indices_or_sections, axis=axis)309 def forward(x):310 return np.split(x, indices_or_sections, axis=axis)311 check_function(y, forward)312def test_split():313 verify_split((2, 3), 2, axis=0)314 verify_split((5, 3), [3], axis=0)315 verify_split((5, 9, 3), [3, 4], axis=1)316def verify_strided_slice(ishape, begin, end, strideinp=None):317 stride = strideinp if strideinp else [1, 1, 1]318 x = sym.Variable("x", shape=ishape)319 if strideinp:320 y = sym.strided_slice(x, begin = begin, end = end, stride = stride) + 1321 else:322 y = sym.strided_slice(x, begin = begin, end = end) + 1323 for i in range(len(begin), 3):324 begin.append(0)325 for i in range(len(end), 3):326 end.append(ishape[i])327 def test_forward(x):328 return x[begin[0]:end[0]:stride[0],329 begin[1]:end[1]:stride[1], begin[2]:end[2]:stride[2]] + 1330 check_function(y, test_forward)331def test_strided_slice():332 verify_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])333 verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])334 verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])335 verify_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2])336 verify_strided_slice((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])337 verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3])338 verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 1000, 3])339 verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4])340 verify_strided_slice((3, 4, 3), [1, 1], [4, 4, 3])341def verify_take(src_shape, indices_src, axis=None):342 src_dtype = "float32"343 indices_dtype = "int32"344 indices_src = np.array(indices_src, dtype=indices_dtype)345 a = sym.Variable("a", shape=src_shape)346 indices = sym.Variable("indices", shape=indices_src.shape)347 y = sym.take(a, indices, axis=axis)348 def forward(a, indices):349 return np.take(a, indices=indices, axis=axis)350 a_src = np.arange(np.prod(src_shape), dtype=src_dtype).reshape(src_shape)351 check_function(y, forward,352 dtype={'a': src_dtype, 'indices': indices_dtype},353 values={'a': a_src, 'indices': indices_src})354def test_take():355 verify_take((4,), [1])356 verify_take((4,), [[0,1,2,3]])357 verify_take((3,3,3), [[11,25]])358 verify_take((4,), [[0,1],[2,3]])359 verify_take((4,), [1], 0)360 verify_take((2,2), [[[1,0],[0,1]]], 0)361 verify_take((2,2), [[[1,0],[0,1]]], 1)362 verify_take((4,3,5,6), [[2,1,0,0]], -2)363def verify_squeeze(shape, axis):364 x = sym.Variable("x")365 if axis is not None:366 y = sym.squeeze(x, axis=axis)367 else:368 y = sym.squeeze(x)369 y = y + 1370 def forward(x):371 return np.squeeze(x, axis=axis) + 1372 def backward(head_grads, x):373 return [np.reshape(head_grads, x.shape)]374 check_function(y, forward, backward, shape=shape)375def test_squeeze():376 verify_squeeze((1, 3, 2, 5), None)377 verify_squeeze((1, 3, 1), axis=0)378 verify_squeeze((1, 3, 2, 5, 1), axis=-1)379def test_pad():380 x = sym.Variable("x")381 y = sym.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), pad_value=1.)382 def forward(x):383 return np.pad(x,384 pad_width=((0, 0), (0, 0), (0, 1), (2, 3)),385 mode='constant', constant_values=1.)386 shape = {'x': (1, 3, 28, 28)}387 check_function(y, forward, shape=shape)388def verify_lrn(ishape, size, axis, bias, alpha, beta):389 x = sym.Variable("x", shape=ishape)390 y = sym.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)391 def forward1(x):392 return topi.testing.lrn_python(x, size, axis, bias, alpha, beta)393 check_function(y, forward1)394 def forward2(x):395 y = forward1(x)396 return (y > 0)*y397 #Checking LRN op followed by elementwise op relu398 check_function(sym.relu(y), forward2, in_range={'x': (-10.0, 10.0)})399def verify_l2_normalize(ishape, eps, axis):400 x = sym.Variable("x", shape=ishape)401 y = sym.l2_normalize(x, eps=eps, axis=axis)402 def forward1(x):403 return topi.testing.l2_normalize_python(x, eps, axis)404 check_function(y, forward1)405 def forward2(x):406 y = forward1(x)407 return (y > 0)*y408 #Checking L2 normalization op followed by elementwise op relu409 check_function(sym.relu(y), forward2, in_range={'x': (-10.0, 10.0)})410def test_lrn():411 verify_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)412 verify_lrn((1, 3, 20, 20), 3, 1, 2.0, 1.0, 0.75)413def test_l2_normalize():414 verify_l2_normalize((1, 3, 20, 20), 0.001, (1,))415 verify_l2_normalize((1, 3, 20, 20), 0.001, (1, 2))416if __name__ == "__main__":417 test_check_function()418 test_split()419 test_concatenate()420 test_log_softmax()421 test_batchnorm()422 test_dense()423 test_relu()424 test_prelu_nchw()425 test_prelu_nhwc()426 test_sym_scalar_pow()427 test_scalar_sym_pow()428 test_exp()429 test_log()430 test_tanh()431 test_sigmoid()...

Full Screen

Full Screen

HW6.py

Source:HW6.py Github

copy

Full Screen

1from Functions import *23H = [[1, 1, 1, 0, 0, 0, 0, 0],4 [0, 0, 0, 1, 1, 1, 0, 0],5 [1, 0, 0, 1, 0, 0, 1, 0],6 [0, 1, 0, 0, 1, 0, 0, 1]]78N = int(len(H[0]))9K = int(len(H))10M = N - K11delta = 0.5 # actually is delta**212c = [0.2, 0.2, -0.9, 0.6, 0.5, -1.1, -0.4, -1.2]1314check_function = [[] for _ in range(K)]15bit_function = [[] for _ in range(N)]16for i in range(K):17 check_function[i] = [i for i, x in enumerate(H[i]) if x == 1]18 for j in range(N):19 if H[i][j] == 1:20 bit_function[j].append(i)21print('check node connections: ', check_function)22print('bit node connections: ', bit_function, '\n')2324L_Xi = []25for item in c:26 L_Xi.append(2 * item / delta)27# print(L_Xi)2829L_qij = [[] for _ in range(K)]30for j in range(K):31 for k in range(len(check_function[j])):32 L_qij[j].append(L_Xi[check_function[j][k]])33 # L_qij[j].append(LLR_qij(q(1, c[check_function[j][k]], delta), q(-1, c[check_function[j][k]], delta)))34# print(L_qij, len(L_qij))3536iter_num = 837for m in range(iter_num):38 # print(m, L_qij)39 rji = [[] for _ in range(N)]40 for p in range(len(L_qij)):41 for i in range(len(L_qij[p])):42 compute_data = L_qij[p].copy()43 compute_data.remove(L_qij[p][i])44 rji[check_function[p][i]].append(LLR_rji(compute_data))45 # print(m, rji)46 L_qij_update = [[] for _ in range(K)]47 for b in range(len(rji)):48 for i in range(len(rji[b])):49 data = rji[b].copy()50 data.remove(rji[b][i])51 l_qij = L_Xi[b] + sum(data)52 L_qij_update[bit_function[b][rji[b].index(rji[b][i])]].append(round(l_qij, 4))53 # print(m, L_qij_update)54 L_Q = []55 for a in range(len(rji)):56 L_Q.append(LLR_q_update((2 * c[a]) / delta, rji[a]))57 print(f'LQ after {m+1} iterations', L_Q)58 L_qij = L_qij_update5960 temp_decision = []61 for item in L_Q:62 if item < 0:63 temp_decision.append(1)64 elif item > 0:65 temp_decision.append(0) ...

Full Screen

Full Screen

test_worker.py

Source:test_worker.py Github

copy

Full Screen

1#2# Licensed under the Apache License, Version 2.0 (the "License");3# you may not use this file except in compliance with the License.4# You may obtain a copy of the License at5#6# http://www.apache.org/licenses/LICENSE-2.07#8# Unless required by applicable law or agreed to in writing, software9# distributed under the License is distributed on an "AS IS" BASIS,10# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or11# implied.12# See the License for the specific language governing permissions and13# limitations under the License.14import mock15from neutron.common import utils16from neutron.tests import base17from neutron import worker as neutron_worker18class PeriodicWorkerTestCase(base.BaseTestCase):19 def test_periodic_worker_lifecycle(self):20 check_function = mock.Mock()21 worker = neutron_worker.PeriodicWorker(22 check_function, interval=1, initial_delay=1)23 self.addCleanup(worker.stop)24 worker.wait()25 self.assertFalse(check_function.called)26 worker.start()27 utils.wait_until_true(28 lambda: check_function.called,29 timeout=5,30 exception=RuntimeError("check_function not called"))31 worker.stop()32 check_function.reset_mock()33 worker.wait()34 self.assertFalse(check_function.called)35 worker.reset()36 utils.wait_until_true(37 lambda: check_function.called,38 timeout=5,...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytractor automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful