How to use run_and_check method in avocado

Best Python code snippet using avocado_python

test_hybrid_script.py

Source:test_hybrid_script.py Github

copy

Full Screen

...18from tvm.contrib import util19from tvm.hybrid import script20from tvm.hybrid.runtime import HYBRID_GLOBALS21@pytest.mark.skip22def run_and_check(func, args, var_dict={}, target='llvm', sch=None, outs=None):23 def tvm_val_2_py_val(val):24 val = tvm.ir_pass.Substitute(val, var_dict)25 val = tvm.ir_pass.Simplify(val)26 assert isinstance(val, (tvm.expr.IntImm,))27 return val.value28 ctx = tvm.context(target, 0)29 op = None30 if sch is None:31 outs = func(*tuple(tvm.convert(i) if isinstance(i, list) else i for i in args))32 op = outs[0].op if isinstance(outs, list) else outs.op33 sch = tvm.create_schedule(op)34 else:35 assert outs is not None36 assert isinstance(outs, list)37 op = outs[0].op38 emu_args = []39 nd_args = []40 for i in args:41 if isinstance(i, tvm.tensor.Tensor):42 shape = [tvm_val_2_py_val(j) for j in i.shape]43 emu_args.append(numpy.random.randn(*shape).astype(i.dtype))44 nd_args.append(tvm.nd.array(emu_args[-1], ctx))45 elif isinstance(i, tvm.expr.Var):46 emu_args.append(tvm_val_2_py_val(i))47 nd_args.append(emu_args[-1])48 else:49 assert isinstance(i, list)50 emu_args.append(numpy.array(i))51 compile_args = [i for i in args if isinstance(i, (tvm.tensor.Tensor, tvm.expr.Var))] + \52 (outs if isinstance(outs, list) else [outs])53 module = tvm.build(sch,54 compile_args,55 target=target)56 assert module57 out_tensors = []58 for i in range(op.num_outputs):59 output = op.output(i)60 shape = [tvm_val_2_py_val(j) for j in output.shape]61 nd_args.append(tvm.nd.array(numpy.zeros(shape).astype(output.dtype), ctx))62 out_tensors.append(nd_args[-1])63 ref_data = func(*emu_args)64 if isinstance(ref_data, numpy.ndarray):65 ref_data = [ref_data]66 module(*nd_args)67 for nd, np in zip(out_tensors, ref_data):68 tvm.testing.assert_allclose(nd.asnumpy(), np, rtol=1e-5, atol=1e-5)69 module_args = [i for i in args if isinstance(i, (tvm.tensor.Tensor, tvm.expr.Var))]70 module_outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs71 h_module = tvm.hybrid.build(sch, module_args, module_outs)72 return h_module, module_args, module_outs73@script74def outer_product(n, m, a, b):75 """This is a simple outer product.76 Actually this function is not required to be documented.77 I write this docstring to test skipping docstring functionality.78 """79 c = output_tensor((n, m), a.dtype)80 for i in range(n):81 for j in range(m):82 assert i < n and j < m, "index out of range!"83 c[i, j] = a[i] * b[j]84 return c85#Test global function86#Test bridge between frontend and backend87def test_outer_product():88 n = tvm.size_var('n')89 m = tvm.size_var('m')90 a = tvm.placeholder((n, ), name='a')91 b = tvm.placeholder((m, ), name='b')92 try:93 c = outer_product(n, m, a, b)94 ir = c.op.body95 except IOError as err:96 assert sys.version_info[0] == 2 and str(err) == 'could not get source code'97 return98 #Check for i in (0, n)99 assert isinstance(ir, tvm.stmt.For)100 assert ir.loop_var.name == 'i'101 assert ir.min.value == 0102 assert ir.extent.name == 'n'103 ibody = ir.body104 assert isinstance(ibody, tvm.stmt.For)105 #Check for j in (0, m)106 assert ibody.loop_var.name == 'j'107 assert ibody.min.value == 0108 assert ibody.extent.name == 'm'109 #Check loop body110 jblock = ibody.body111 assert isinstance(jblock, tvm.stmt.SeqStmt)112 jbody = jblock[0]113 assert isinstance(jbody, tvm.stmt.AssertStmt)114 assert isinstance(jbody.message, tvm.expr.StringImm)115 assert jbody.message.value == "index out of range!"116 jbody = jblock[1]117 assert isinstance(jbody, tvm.stmt.Provide)118 assert jbody.func.name == 'c'119 assert len(jbody.args) == 2120 assert jbody.args[0].name == 'i'121 assert jbody.args[1].name == 'j'122 assert isinstance(jbody.value, tvm.expr.Mul)123 mul = jbody.value124 assert isinstance(mul.a, tvm.expr.Call)125 assert mul.a.name == 'a'126 assert mul.b.name == 'b'127 func, ins, outs = run_and_check(outer_product, [n, m, a, b], {n: 99, m: 101})128 temp = util.tempdir()129 path = temp.relpath('%s.py' % func.name)130 func.save(path)131 func_ = tvm.hybrid.HybridModule()132 func_.load(path)133 run_and_check(func_, ins, {n: 99, m: 101}, outs=outs)134 for key, _ in HYBRID_GLOBALS.items():135 assert key not in globals().keys()136 assert key not in outer_product.__globals__.keys()137#Test local function138#Test allocation of local variable139def test_fanout():140 @script141 def fanout(n, a):142 three = 3.0143 b = output_tensor((a.shape[0] - 3, ), a.dtype)144 for i in range(a.shape[0] - 3):145 sigma = 0.0146 for j in range(3):147 sigma += a[i + j]148 sigma = sigma / three149 b[i] = sigma150 return b151 n = tvm.size_var('n')152 a = tvm.placeholder((n, ), 'float32', name='a')153 try:154 b = fanout(n, a)155 ir = b.op.body156 except IOError as err:157 assert sys.version_info[0] == 2 and str(err) == 'could not get source code'158 return159 #Check for i in (0, n-3)160 assert isinstance(ir, tvm.stmt.For)161 assert ir.loop_var.name == 'i'162 assert ir.min.value == 0163 assert tvm.ir_pass.Equal(ir.extent, n - 3)164 #Check loopbody165 ibody = ir.body166 assert isinstance(ibody, tvm.stmt.AttrStmt)167 abody = ibody.body168 assert isinstance(abody, tvm.stmt.Realize)169 assert abody.bounds[0].min.value == 0170 assert abody.bounds[0].extent.value == 1171 assert abody.func.name == 'sigma'172 #Check i loop body173 rbody = abody.body174 assert isinstance(rbody[0], tvm.stmt.Provide)175 assert rbody[0].func.name == 'sigma'176 assert len(rbody[0].args) == 1177 assert rbody[0].args[0].value == 0178 #Check fanout loop179 jloop = rbody[1]180 assert jloop.loop_var.name == 'j'181 assert jloop.min.value == 0182 assert jloop.extent.value == 3183 jbody = jloop.body184 assert isinstance(jbody, tvm.stmt.Provide)185 assert len(jbody.args) == 1186 assert jbody.args[0].value == 0187 assert jbody.func.name == 'sigma'188 assert isinstance(jbody.value, tvm.expr.Add)189 value = jbody.value190 assert isinstance(value.a, tvm.expr.Call)191 assert value.a.name == 'sigma'192 assert len(value.a.args) == 1193 assert value.a.args[0].value == 0194 assert value.b.name == 'a'195 assert len(value.b.args) == 1196 assert tvm.ir_pass.Equal(value.b.args[0], ir.loop_var + jloop.loop_var)197 divide= rbody[2]198 assert isinstance(divide, tvm.stmt.Provide)199 assert len(divide.args) == 1200 assert divide.args[0].value == 0201 value = divide.value202 assert isinstance(value, tvm.expr.Mul)203 assert value.a.name == 'sigma'204 assert len(value.a.args) == 1205 assert value.a.args[0].value == 0206 assert abs(value.b.value - (1 / 3.0)) < 1e-5207 write = rbody[3]208 assert isinstance(write, tvm.stmt.Provide)209 assert write.func.name == 'b'210 assert write.value.name == 'sigma'211 assert len(write.value.args) == 1212 assert write.value.args[0].value == 0213 func, ins, outs = run_and_check(fanout, [n, a], {n: 10})214 run_and_check(func, ins, {n: 10}, outs=outs)215def test_looptype():216 @script217 def looptype(a, b, c):218 d = output_tensor((16, ), 'int32')219 e = output_tensor((16, ), 'int32')220 f = output_tensor((16, ), 'int32')221 for i in parallel(16):222 d[i] = a[i]223 for j in vectorize(16):224 e[j] = b[j]225 for k in unroll(16):226 f[k] = c[k]227 return d, e, f228 a = tvm.placeholder((16, ), name='a', dtype='int32')229 b = tvm.placeholder((16, ), name='b', dtype='int32')230 c = tvm.placeholder((16, ), name='c', dtype='int32')231 try:232 d, e, f = looptype(a, b, c)233 ir = d.op.body234 except:235 return236 iloop = ir[0]237 jloop = ir[1]238 kloop = ir[2]239 assert iloop.for_type == tvm.stmt.For.Parallel240 assert jloop.for_type == tvm.stmt.For.Vectorized241 assert kloop.for_type == tvm.stmt.For.Unrolled242 func, ins, outs = run_and_check(looptype, [a, b, c])243 run_and_check(func, ins, outs=outs)244def test_if():245 @script246 def if_then_else(a):247 b = output_tensor((10, ), 'int32')248 c = output_tensor((10, ), 'int32')249 for i in range(10):250 if i % 2 == 0:251 c[i] = a[i]252 else:253 c[i] = b[i]254 for i in unroll(10):255 b[i] = -1 if i % 2 == 0 else 1256 return b, c257 a = tvm.placeholder((10, ), dtype='int32', name='a')258 func, ins, outs = run_and_check(if_then_else, [a])259 run_and_check(func, ins, outs=outs)260 @script261 def if_triple_condition(a):262 b = output_tensor((10, ), 'int32')263 for i in range(10):264 if 0 <= i < 5:265 b[i] = a[i]266 else:267 b[i] = a[i] + 1268 return b269 func, ins, outs = run_and_check(if_triple_condition, [a])270 run_and_check(func, ins, outs=outs)271 @script272 def if_and(a):273 b = output_tensor((10, ), 'int32')274 for i in range(10):275 if i >= 0 and i < 5:276 b[i] = a[i]277 else:278 b[i] = a[i] + 1279 return b280 func, ins, outs = run_and_check(if_and, [a])281 run_and_check(func, ins, outs=outs)282def test_bind():283 if not tvm.gpu(0).exist:284 print('[Warning] No GPU found! Skip bind test!')285 return286 @script287 def vec_add(a, b):288 c = output_tensor((1000, ), 'float32')289 for tx in bind('threadIdx.x', 1000):290 c[tx] = a[tx] + b[tx]291 return c292 a = tvm.placeholder((1000, ), dtype='float32', name='a')293 b = tvm.placeholder((1000, ), dtype='float32', name='b')294 func, ins, outs = run_and_check(vec_add, [a, b], target='cuda')295 run_and_check(func, ins, outs=outs, target='cuda')296 @script297 def raw(a, b):298 c = output_tensor((1000, ), 'float32')299 for i in range(1000):300 c[i] = a[i] + b[i]301 return c302 c = raw(a, b)303 sch = tvm.create_schedule(c.op)304 x = tvm.thread_axis('threadIdx.x')305 sch[c].bind(c.op.axis[0], x)306 func, ins, outs = run_and_check(raw, [a, b], sch=sch, outs=[c], target='cuda')307 run_and_check(func, ins, outs=outs, target='cuda')308 @tvm.hybrid.script309 def foo(a):310 c = output_tensor((a.shape[0],), a.dtype)311 total = allocate((1,), a.dtype, 'local')312 len_i = a.shape[0]313 len_j = a.shape[1]314 for i in bind('threadIdx.x', len_i):315 total[0] = 0.316 for k in const_range(len_j):317 total[0] += a[i, k]318 c[i] = total[0]319 return c320 a = tvm.placeholder((8, 4), 'float32')321 c = foo(a)322 s = tvm.create_schedule(c.op)323 ir = tvm.lower(s, [a, c], simple_mode=True)324 assert not isinstance(ir, tvm.stmt.AttrStmt)325 func, ins, outs = run_and_check(foo, [a], target='cuda')326 run_and_check(func, ins, outs=outs, target='cuda')327 @tvm.hybrid.script328 def max_threads(a):329 b = output_tensor(a.shape, a.dtype)330 n = a.shape[0]331 m = max_num_threads(True)332 for i in bind('threadIdx.x', m):333 for j in bind('blockIdx.x', ceil_div(n, m)):334 if i * m + j < n:335 b[i * m + j] = a[i * m + j] + a[i * m + j]336 return b337 a = tvm.placeholder((10000, ), 'float32')338 with tvm.target.create('cuda'):339 func, ins, outs = run_and_check(max_threads, [a], target='cuda')340 run_and_check(func, ins, outs=outs, target='cuda')341def test_math_intrin():342 @script343 def intrin_real(a):344 b = output_tensor((8, ), 'float32')345 b[0] = sqrt(a[0])346 b[1] = log(a[1])347 b[2] = exp(a[2])348 b[3] = sigmoid(a[3])349 b[4] = power(a[4], a[5])350 b[5] = tanh(a[5])351 b[6] = min(a[4], a[5])352 b[7] = max(a[5], a[6])353 return b354 a8 = tvm.placeholder((8, ), dtype='float32', name='a')355 b8 = intrin_real(a8)356 sch = tvm.create_schedule(b8.op)357 func = tvm.build(sch, [a8, b8])358 assert func359 a = numpy.arange(2, 10).astype('float32')360 tvm_a = tvm.ndarray.array(a)361 tvm_b = tvm.ndarray.array(numpy.zeros((8, ), dtype='float32'))362 b = intrin_real(a)363 func(tvm_a, tvm_b)364 tvm.testing.assert_allclose(b, tvm_b.asnumpy(), rtol=1e-5)365 @script366 def intrin_int(a):367 b = output_tensor((1, ), 'int32')368 b[0] = popcount(a[0])369 return b370 a1 = tvm.placeholder((1, ), dtype='int32')371 b1 = intrin_int(a1)372 sch = tvm.create_schedule(b1.op)373 func = tvm.build(sch, [a1, b1])374 assert func375 a = numpy.array([114514]).astype('int32')376 tvm_a = tvm.ndarray.array(a)377 tvm_b = tvm.ndarray.array(numpy.array([0]).astype('int32'))378 b = intrin_int(a)379 func(tvm_a, tvm_b)380 assert tvm_b.asnumpy()[0] == b[0]381# test non caconical loops382def test_non_zero():383 @tvm.hybrid.script384 def blur(a):385 b = output_tensor((30, 30), 'float32')386 for i in range(2, 32):387 for j in range(2, 32):388 s = 0.0389 for di in range(3):390 for dj in range(3):391 s += a[i-di, j-dj]392 b[i-2, j-2] = s / 9.0393 return b394 a = tvm.placeholder((32, 32), 'float32', 'a')395 func, ins, outs = run_and_check(blur, [a])396 run_and_check(func, ins, outs=outs)397 @tvm.hybrid.script398 def triangle(a, b):399 c = output_tensor((10, 10), dtype='float32')400 for i in range(10):401 for j in range(i, 10):402 c[i, j] = a[i] * b[j]403 return c404 a = tvm.placeholder((10, ), dtype='float32', name='a')405 b = tvm.placeholder((10, ), dtype='float32', name='b')406 func, ins, outs = run_and_check(triangle, [a, b])407 run_and_check(func, ins, outs=outs)408def test_allocate():409 @tvm.hybrid.script410 def blur2d(a):411 b = output_tensor((30, 30), 'float32')412 for i in range(30):413 ha = allocate((3, 30), 'float32')414 for j in range(3):415 for k in range(30):416 ha[j, k] = a[i+j, k] + a[i+j, k+1] + a[i+j, k+2]417 for j in range(30):418 b[i, j] = (ha[0, j] + ha[1, j] + ha[2, j]) / 9.0419 return b420 a = tvm.placeholder((32, 32), 'float32', 'a')421 b = blur2d(a)422 sch = tvm.create_schedule(b.op)423 func, ins, outs = run_and_check(blur2d, [a])424 run_and_check(func, ins, outs=outs)425 if tvm.gpu().exist:426 @tvm.hybrid.script427 def share_vec_add(a, b):428 c = output_tensor((256, ), 'float32')429 shared = allocate((256, ), 'float32', 'shared')430 for i in bind("threadIdx.x", 256):431 shared[i] = a[i]432 local = allocate((256, ), 'float32', 'local')433 for i in bind("threadIdx.x", 256):434 local[i] = b[i]435 for i in bind("threadIdx.x", 256):436 c[i] = shared[i] + local[i]437 return c438 a = tvm.placeholder((256, ), dtype='float32', name='a')439 b = tvm.placeholder((256, ), dtype='float32', name='b')440 c = share_vec_add(a, b)441 func, ins, outs = run_and_check(share_vec_add, [a, b], target='cuda')442 run_and_check(func, ins, outs=outs, target='cuda')443 else:444 print('[Warning] No GPU found! Skip shared mem test!')445def test_upstream():446 @tvm.hybrid.script447 def upstream(a):448 b = output_tensor((20, ), 'float32')449 for i in range(20):450 b[i] = a[i] * i451 return b452 a = tvm.placeholder((20, ), 'float32')453 b = tvm.placeholder((20, ), 'float32')454 c = tvm.compute((20, ), lambda x: a[x] + b[x])455 d = upstream(c)456 sch = tvm.create_schedule([c.op, d.op])457 ir = tvm.lower(sch, [a, b, d], simple_mode=True)458 func = tvm.build(sch, [a, b, d])459 assert(func)460 a = numpy.random.randn(20).astype('float32')461 b = numpy.random.randn(20).astype('float32')462 ref = numpy.zeros((20, ), 'float32')463 for i in range(20):464 ref[i] = (a[i] + b[i]) * i465 tvm_a = tvm.nd.array(a)466 tvm_b = tvm.nd.array(b)467 tvm_d = tvm.nd.array(numpy.zeros((20, )).astype('float32'))468 func(tvm_a, tvm_b, tvm_d)469 tvm.testing.assert_allclose(tvm_d.asnumpy(), ref, 1e-5, 1e-5)470def test_downstream():471 @tvm.hybrid.script472 def downstream(a):473 b = output_tensor((20, ), 'float32')474 for i in range(20):475 b[i] = a[i] * i476 return b477 a = tvm.placeholder((20, ), 'float32')478 b = downstream(a)479 c = tvm.compute((20, ), lambda x: b[x] + 1.0)480 sch = tvm.create_schedule(c.op)481 module = tvm.build(sch, [a, c])482 assert module483 a = numpy.random.randn(20).astype('float32')484 ref = numpy.zeros((20, )).astype('float32')485 for i in range(20):486 ref[i] = (a[i] * i) + 1.0487 tvm_a = tvm.nd.array(a)488 tvm_c = tvm.nd.array(numpy.zeros((20, )).astype('float32'))489 module(tvm_a, tvm_c)490 tvm.testing.assert_allclose(tvm_c.asnumpy(), ref, 1e-5, 1e-5)491def test_const_param():492 @tvm.hybrid.script493 def add_something(a, b):494 c = output_tensor((11, ), 'int32')495 for i in range(11):496 c[i] = a[i] + b497 return c498 a = tvm.placeholder((11, ), dtype='int32', name='a')499 b = tvm.const(11, 'int32')500 c = add_something(a, b)501 sch = tvm.create_schedule(c.op)502 module = tvm.build(sch, [a, c], 'llvm')503 assert(module)504 np_a = numpy.arange(11).astype('int32')505 np_b = 11506 np_c = numpy.zeros((11, )).astype('int32')507 nd_a = tvm.ndarray.array(np_a)508 nd_c = tvm.ndarray.array(numpy.zeros((11, )).astype('int32'))509 module(nd_a, nd_c)510 ref = add_something(np_a, 11)511 tvm.testing.assert_allclose(nd_c.asnumpy(), ref, 1e-5, 1e-5)512def test_value_index():513 @tvm.hybrid.script514 def kernel_a(a):515 b = output_tensor((16, ), 'int32')516 c = output_tensor((4, 4), 'int32')517 for i in range(16):518 b[i] = a[i] + 2519 c[i // 4, i % 4] = a[i] + 1520 return b, c521 @tvm.hybrid.script522 def kernel_b(b, a):523 c = output_tensor((4, 4), 'int32')524 for i in range(4):525 for j in range(4):526 c[i, j] = a[i * 4 + j] * b[i, j]527 return c528 a = tvm.placeholder((16, ), 'int32')529 b, c = kernel_a(a)530 d = kernel_b(c, b)531 sch = tvm.create_schedule(d.op)532 module = tvm.build(sch, [a, d])533 assert module534 np_a = numpy.arange(16).astype('int32')535 np_b, np_c = kernel_a(np_a)536 ref = kernel_b(np_c, np_b)537 res = tvm.ndarray.array(numpy.zeros((4, 4)).astype('int32'))538 module(tvm.ndarray.array(np_a), res)539 tvm.testing.assert_allclose(res.asnumpy(), ref)540def test_func_call():541 @tvm.hybrid.script542 def foo(a, b):543 for i in range(len(a)):544 a[i] = i + 1.0545 for i in range(len(a)):546 b[i] = i + 1.0547 c = outer_product(10, 10, a, b)548 d = output_tensor(c.shape, c.dtype)549 for i in range(10):550 for j in range(10):551 d[i, j] = c[i, j] + i * j552 return d553 a = tvm.placeholder((10, ), name='a')554 b = tvm.placeholder((10, ), name='b')555 func, ins, outs = run_and_check(foo, [a, b])556 run_and_check(func, ins, outs=outs)557def test_bool():558 @tvm.hybrid.script559 def foo(a):560 b = output_tensor(a.shape, a.dtype)561 b[0] = 1.2562 for i in range(1, a.shape[0] - 1):563 if a[i] * a[i - 1] < a[i] or a[i] * a[i - 1] < a[i - 1] or i * a[i] == a[i]:564 b[i] = a[i]565 else:566 b[i] = 0.0567 return b568 a = tvm.placeholder((10, ), name='a')569 func, ins, outs = run_and_check(foo, [a])570 run_and_check(func, ins, outs=outs)571def test_const_range():572 @tvm.hybrid.script573 def foo(a, b):574 c = output_tensor(a.shape, a.dtype)575 d = output_tensor(a.shape, 'int32')576 for i in const_range(2):577 for j in const_range(5):578 c[i, j] = float32(int32(a[i, j]) + b[i, j])579 for i in const_range(len(b)):580 for j in const_range(len(b[0])):581 d[i, j] = int32(a[i, j] + b[i, j])582 return c, d583 a = tvm.placeholder((2, 5), name='a', dtype='float32')584 b = [[1, 2, 3, 4, 5], [5, 4, 3, 2, 1]]585 func, ins, outs = run_and_check(foo, [a, b])586 run_and_check(func, ins, outs=outs)587 @tvm.hybrid.script588 def goo(a, b):589 c = output_tensor(a.shape, a.dtype)590 len_b = len(b)591 for i in const_range(len_b * 2):592 if i < len_b:593 c[i] = a[i] + b[i]594 else:595 c[i - len_b] = a[i - len_b] + b[i - len_b]596 return c597 a = tvm.placeholder((5, ), name='a', dtype='int32')598 b = [1, 2, 3, 4, 5]599 c = goo(a, tvm.convert(b))600 sch = tvm.create_schedule(c.op)601 func, ins, outs = run_and_check(goo, [a, b])602 run_and_check(func, ins, outs=outs)603 @tvm.hybrid.script604 def hoo(a, b):605 c = output_tensor(a.shape, a.dtype)606 len_b = len(b)607 for i in range(a.shape[0]):608 for j in const_range(len(b)):609 d = a[i] * b[j]610 d += a[i] + b[j]611 c[i] = d612 return c613 a = tvm.placeholder((5, ), name='a', dtype='int32')614 b = [1, 2, 3, 4, 5]615 func, ins, outs = run_and_check(hoo, [a, b])616 run_and_check(func, ins, outs=outs)617def test_schedule():618 @script619 def outer_product(a, b):620 c = output_tensor((64, 64), a.dtype)621 for i in range(64):622 for j in range(64):623 c[i, j] = a[i] * b[j]624 return c625 a = tvm.placeholder((64,), name='a', dtype='float32')626 b = tvm.placeholder((64,), name='b', dtype='float32')627 c = outer_product(a, b)628 # Test perfect loop split629 # Test loop reorder630 # Test loop annotation631 sch = tvm.create_schedule(c.op)632 i, j = c.op.axis633 io, ii = sch[c].split(i, 4)634 sch[c].parallel(ii)635 jo, ji = sch[c].split(j, 4)636 joo, joi = sch[c].split(jo, 4)637 sch[c].vectorize(ji)638 sch[c].reorder(ii, io, joo, joi, ji)639 ir = tvm.lower(sch, [a, b, c], simple_mode=True)640 assert isinstance(ir, tvm.stmt.ProducerConsumer)641 ir = ir.body642 assert isinstance(ir, tvm.stmt.AttrStmt)643 ir = ir.body644 assert isinstance(ir, tvm.stmt.For)645 assert ir.loop_var.name == 'i.inner'646 ir = ir.body647 assert isinstance(ir, tvm.stmt.For)648 assert ir.loop_var.name == 'i.outer'649 ir = ir.body650 assert isinstance(ir, tvm.stmt.For)651 assert ir.loop_var.name == 'j.outer.outer'652 ir = ir.body653 assert isinstance(ir, tvm.stmt.For)654 assert ir.loop_var.name == 'j.outer.inner'655 ir = ir.body656 func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])657 run_and_check(func, ins, outs=outs)658 # Test fuse659 sch = tvm.create_schedule(c.op)660 sch[c].fuse(c.op.axis[0], c.op.axis[1])661 ir = tvm.lower(sch, [a, b, c], simple_mode=True)662 assert isinstance(ir, tvm.stmt.ProducerConsumer)663 ir = ir.body664 assert isinstance(ir, tvm.stmt.AttrStmt)665 ir = ir.body666 assert isinstance(ir, tvm.stmt.For)667 assert ir.loop_var.name == 'i.j.fused'668 func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])669 run_and_check(func, ins, outs=outs)670 # Test imperfect loop split671 sch = tvm.create_schedule(c.op)672 sch[c].split(c.op.axis[0], 3)673 ir = tvm.lower(sch, [a, b, c], simple_mode=True)674 func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])675 run_and_check(func, ins, outs=outs)676 # Test loop binds677def test_capture():678 n = 8679 constant_tuple = (10, n)680 constant_list = [[1, 2], [3, n]]681 const_value = 1682 @tvm.hybrid.script683 def add_something(a):684 c = output_tensor((constant_tuple[1],), 'int32')685 for i in range(constant_tuple[1]):686 c[i] = a[i] + constant_list[1][const_value]687 return c688 a = tvm.placeholder((n, ), dtype='int32', name='a')689 func, ins, outs = run_and_check(add_something, [a])690 run_and_check(func, ins, outs=outs)691def test_array_inputs():692 @script693 def sum_array(inputs):694 out = output_tensor((10,), inputs[0].dtype)695 n = len(inputs)696 for i in range(10):697 for j in const_range(n):698 out[i] += inputs[j][i]699 return out700 n = 5701 inputs = []702 for i in range(n):703 inputs.append(tvm.placeholder((10,), name='t%s' % i, dtype='float32'))704 out = sum_array(tvm.convert(inputs))...

Full Screen

Full Screen

test__refcount.py

Source:test__refcount.py Github

copy

Full Screen

...43 eventlet.spawn(make_request, addr)44 eventlet.sleep(0.1 + SOCKET_TIMEOUT)45 print('run_interaction: refcount(s.fd)', sys.getrefcount(s.fd))46 return weakref.ref(s.fd)47def run_and_check(run_client):48 w = run_interaction(run_client=run_client)49 # clear_sys_exc_info()50 gc.collect()51 fd = w()52 print('run_and_check: weakref fd:', fd)53 if fd:54 print(pprint.pformat(gc.get_referrers(fd)))55 for x in gc.get_referrers(fd):56 print(pprint.pformat(x))57 for y in gc.get_referrers(x):58 print('- {0}'.format(pprint.pformat(y)))59 raise AssertionError('server should be dead by now')60def test_clean_exit():61 run_and_check(True)62 run_and_check(True)63def test_timeout_exit():64 run_and_check(False)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful