How to use set_scope method in tempest

Best Python code snippet using tempest_python

test_vta_insn.py

Source:test_vta_insn.py Github

copy

Full Screen

...25 (n, n, env.BATCH, env.BLOCK_OUT),26 lambda *i: y_buf(*i).astype(env.inp_dtype), "y")27 # schedule28 s = tvm.create_schedule(y.op)29 s[x_buf].set_scope(env.acc_scope)30 s[x_buf].pragma(x_buf.op.axis[0], env.dma_copy)31 s[y_buf].set_scope(env.acc_scope)32 s[y_buf].pragma(y_buf.op.axis[0], env.alu)33 s[y].pragma(y.op.axis[0], env.dma_copy)34 # verification35 with vta.build_config():36 m = vta.build(s, [x, y], "ext_dev", env.target_host)37 if not remote:38 return39 temp = util.tempdir()40 m.save(temp.relpath("load_act.o"))41 remote.upload(temp.relpath("load_act.o"))42 f = remote.load_module("load_act.o")43 # verify44 ctx = remote.ext_dev(0)45 x_np = np.random.randint(46 1, 10, size=(n, n, env.BATCH, env.BLOCK_OUT)).astype(x.dtype)47 y_np = x_np.astype(y.dtype)48 x_nd = tvm.nd.array(x_np, ctx)49 y_nd = tvm.nd.empty(y_np.shape, ctx=ctx, dtype=y_np.dtype)50 f(x_nd, y_nd)51 np.testing.assert_equal(y_np, y_nd.asnumpy())52 vta.testing.run(_run)53def test_padded_load():54 """Test padded load."""55 def _run(env, remote):56 # declare57 n = 2158 m = 2059 pad_before = [0, 1, 0, 0]60 pad_after = [1, 3, 0, 0]61 x = tvm.placeholder(62 (n, m, env.BATCH, env.BLOCK_OUT),63 name="x",64 dtype=env.acc_dtype)65 x_buf = topi.nn.pad(x, pad_before, pad_after, name="y")66 # insert no-op that won't be optimized away67 y_buf = tvm.compute((n + pad_before[0] + pad_after[0],68 m + pad_before[1] + pad_after[1],69 env.BATCH,70 env.BLOCK_OUT), lambda *i: x_buf(*i)>>0, "y_buf")71 y = tvm.compute((n + pad_before[0] + pad_after[0],72 m + pad_before[1] + pad_after[1],73 env.BATCH,74 env.BLOCK_OUT), lambda *i: y_buf(*i).astype(env.inp_dtype), "y")75 # schedule76 s = tvm.create_schedule(y.op)77 s[x_buf].set_scope(env.acc_scope)78 s[x_buf].pragma(x_buf.op.axis[0], env.dma_copy)79 s[y_buf].set_scope(env.acc_scope)80 s[y_buf].pragma(y_buf.op.axis[0], env.alu)81 s[y].pragma(y.op.axis[0], env.dma_copy)82 # build83 with vta.build_config():84 mod = vta.build(s, [x, y], "ext_dev", env.target_host)85 if not remote:86 return87 temp = util.tempdir()88 mod.save(temp.relpath("padded_load.o"))89 remote.upload(temp.relpath("padded_load.o"))90 f = remote.load_module("padded_load.o")91 # verify92 ctx = remote.ext_dev(0)93 x_np = np.random.randint(1, 2, size=(94 n, m, env.BATCH, env.BLOCK_OUT)).astype(x.dtype)95 y_np = np.zeros((n + pad_before[0] + pad_after[0],96 m + pad_before[1] + pad_after[1],97 env.BATCH,98 env.BLOCK_OUT)).astype(y.dtype)99 y_np[pad_before[0]:pad_before[0] + n,100 pad_before[1]:pad_before[1] + m,101 :] = x_np102 x_nd = tvm.nd.array(x_np, ctx)103 y_nd = tvm.nd.empty(y_np.shape, ctx=ctx, dtype=y_np.dtype)104 f(x_nd, y_nd)105 np.testing.assert_equal(y_np, y_nd.asnumpy())106 vta.testing.run(_run)107def test_gemm():108 """Test GEMM."""109 def _run(env, remote):110 # declare111 o = 4112 n = 1113 m = 4114 x = tvm.placeholder((o, n, env.BATCH, env.BLOCK_IN), name="x", dtype=env.inp_dtype)115 w = tvm.placeholder((m, n, env.BLOCK_OUT, env.BLOCK_IN), name="w", dtype=env.wgt_dtype)116 x_buf = tvm.compute((o, n, env.BATCH, env.BLOCK_IN), lambda *i: x(*i), "x_buf")117 w_buf = tvm.compute((m, n, env.BLOCK_OUT, env.BLOCK_IN), lambda *i: w(*i), "w_buf")118 ko = tvm.reduce_axis((0, n), name="ko")119 ki = tvm.reduce_axis((0, env.BLOCK_IN), name="ki")120 y_gem = tvm.compute(121 (o, m, env.BATCH, env.BLOCK_OUT),122 lambda bo, co, bi, ci:123 tvm.sum(x_buf[bo, ko, bi, ki].astype(env.acc_dtype) *124 w_buf[co, ko, ci, ki].astype(env.acc_dtype),125 axis=[ko, ki]),126 name="y_gem")127 y_shf = tvm.compute(128 (o, m, env.BATCH, env.BLOCK_OUT),129 lambda *i: y_gem(*i)>>8,130 name="y_shf")131 y_max = tvm.compute(132 (o, m, env.BATCH, env.BLOCK_OUT),133 lambda *i: tvm.max(y_shf(*i), 0),134 "y_max") #relu135 y_min = tvm.compute(136 (o, m, env.BATCH, env.BLOCK_OUT),137 lambda *i: tvm.min(y_max(*i), (1<<(env.INP_WIDTH-1))-1),138 "y_min") #relu139 y = tvm.compute(140 (o, m, env.BATCH, env.BLOCK_OUT),141 lambda *i: y_min(*i).astype(env.inp_dtype),142 name="y")143 if not remote:144 return145 def verify(s):146 mod = vta.build(s, [x, w, y], "ext_dev", env.target_host)147 temp = util.tempdir()148 mod.save(temp.relpath("gemm.o"))149 remote.upload(temp.relpath("gemm.o"))150 f = remote.load_module("gemm.o")151 # verify152 ctx = remote.ext_dev(0)153 x_np = np.random.randint(154 -128, 128, size=(o, n, env.BATCH, env.BLOCK_IN)).astype(x.dtype)155 w_np = np.random.randint(156 -128, 128, size=(m, n, env.BLOCK_OUT, env.BLOCK_IN)).astype(w.dtype)157 y_np = np.zeros((o, m, env.BATCH, env.BLOCK_OUT)).astype(y.dtype)158 x_nd = tvm.nd.array(x_np, ctx)159 w_nd = tvm.nd.array(w_np, ctx)160 y_nd = tvm.nd.array(y_np, ctx)161 y_np = y_np.astype(env.acc_dtype)162 for b in range(o):163 for i in range(m):164 for j in range(n):165 y_np[b,i,:] += np.dot(x_np[b,j,:].astype(env.acc_dtype),166 w_np[i,j].T.astype(env.acc_dtype))167 y_np = np.right_shift(y_np, 8)168 y_np = np.clip(y_np, 0, (1<<(env.INP_WIDTH-1))-1).astype(y.dtype)169 if env.TARGET == "sim":170 simulator.clear_stats()171 f(x_nd, w_nd, y_nd)172 print(simulator.stats())173 else:174 f(x_nd, w_nd, y_nd)175 np.testing.assert_equal(y_np, y_nd.asnumpy())176 def test_schedule1():177 # default schedule with no smt178 s = tvm.create_schedule(y.op)179 # set the scope of the SRAM buffers180 s[x_buf].set_scope(env.inp_scope)181 s[w_buf].set_scope(env.wgt_scope)182 s[y_gem].set_scope(env.acc_scope)183 s[y_shf].set_scope(env.acc_scope)184 s[y_max].set_scope(env.acc_scope)185 s[y_min].set_scope(env.acc_scope)186 # set pragmas for DMA transfer and ALU ops187 s[x_buf].compute_at(s[y_gem], ko)188 s[x_buf].pragma(s[x_buf].op.axis[0], env.dma_copy)189 s[w_buf].compute_at(s[y_gem], ko)190 s[w_buf].pragma(s[w_buf].op.axis[0], env.dma_copy)191 s[y_shf].pragma(s[y_shf].op.axis[0], env.alu)192 s[y_max].pragma(s[y_max].op.axis[0], env.alu)193 s[y_min].pragma(s[y_min].op.axis[0], env.alu)194 s[y].pragma(s[y].op.axis[0], env.dma_copy)195 # tensorization196 s[y_gem].reorder(197 ko,198 s[y_gem].op.axis[0],199 s[y_gem].op.axis[1],200 s[y_gem].op.axis[2],201 s[y_gem].op.axis[3],202 ki)203 s[y_gem].tensorize(s[y_gem].op.axis[2], env.gemm)204 verify(s)205 def test_smt():206 # test smt schedule207 s = tvm.create_schedule(y.op)208 s[x_buf].set_scope(env.inp_scope)209 s[w_buf].set_scope(env.wgt_scope)210 s[y_gem].set_scope(env.acc_scope)211 s[y_shf].set_scope(env.acc_scope)212 s[y_max].set_scope(env.acc_scope)213 s[y_min].set_scope(env.acc_scope)214 abo, aco, abi, aci = s[y].op.axis215 abo1, abo2 = s[y].split(abo, nparts=2)216 s[y].bind(abo1, tvm.thread_axis("cthread"))217 s[y_gem].compute_at(s[y], abo1)218 s[y_shf].compute_at(s[y], abo1)219 s[y_max].compute_at(s[y], abo1)220 s[y_min].compute_at(s[y], abo1)221 s[y_gem].reorder(222 ko,223 s[y_gem].op.axis[0],224 s[y_gem].op.axis[1],225 s[y_gem].op.axis[2],226 s[y_gem].op.axis[3],227 ki)228 s[y_gem].tensorize(s[y_gem].op.axis[2], env.gemm)229 s[y_shf].pragma(s[y_shf].op.axis[0], env.alu)230 s[y_max].pragma(s[y_max].op.axis[0], env.alu)231 s[y_min].pragma(s[y_min].op.axis[0], env.alu)232 s[x_buf].compute_at(s[y_gem], ko)233 s[x_buf].pragma(s[x_buf].op.axis[0], env.dma_copy)234 s[w_buf].compute_at(s[y_gem], ko)235 s[w_buf].pragma(s[w_buf].op.axis[0], env.dma_copy)236 s[y].pragma(abo2, env.dma_copy)237 verify(s)238 test_schedule1()239 test_smt()240 vta.testing.run(_run)241def test_alu():242 def _run(env, remote):243 def check_alu(tvm_op, np_op=None, use_imm=False):244 """Test ALU"""245 m = 8246 n = 8247 imm = np.random.randint(1,5)248 # compute249 a = tvm.placeholder(250 (m, n, env.BATCH, env.BLOCK_OUT),251 name="a",252 dtype=env.acc_dtype)253 a_buf = tvm.compute(254 (m, n, env.BATCH, env.BLOCK_OUT),255 lambda *i: a(*i),256 "a_buf") #DRAM->SRAM257 if use_imm:258 res_buf = tvm.compute(259 (m, n, env.BATCH, env.BLOCK_OUT),260 lambda *i: tvm_op(a_buf(*i), imm),261 "res_buf") #compute262 else:263 b = tvm.placeholder(264 (m, n, env.BATCH, env.BLOCK_OUT),265 name="b",266 dtype=env.acc_dtype)267 b_buf = tvm.compute(268 (m, n, env.BATCH, env.BLOCK_OUT),269 lambda *i: b(*i),270 "b_buf") #DRAM->SRAM271 res_buf = tvm.compute(272 (m, n, env.BATCH, env.BLOCK_OUT),273 lambda *i: tvm_op(a_buf(*i), b_buf(*i)),274 "res_buf") #compute5B275 res = tvm.compute(276 (m, n, env.BATCH, env.BLOCK_OUT),277 lambda *i: res_buf(*i).astype(env.inp_dtype),278 "res") #SRAM->DRAM279 # schedule280 s = tvm.create_schedule(res.op)281 s[a_buf].set_scope(env.acc_scope) # SRAM282 s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy) # DRAM->SRAM283 s[res_buf].set_scope(env.acc_scope) # SRAM284 s[res_buf].pragma(res_buf.op.axis[0], env.alu) # compute285 s[res].pragma(res.op.axis[0], env.dma_copy) # SRAM->DRAM286 if not use_imm:287 s[b_buf].set_scope(env.acc_scope) # SRAM288 s[b_buf].pragma(b_buf.op.axis[0], env.dma_copy) # DRAM->SRAM289 if not remote:290 return291 # build292 with vta.build_config():293 if use_imm:294 mod = vta.build(s, [a, res], "ext_dev", env.target_host)295 else:296 mod = vta.build(s, [a, b, res], "ext_dev", env.target_host)297 temp = util.tempdir()298 mod.save(temp.relpath("load_act.o"))299 remote.upload(temp.relpath("load_act.o"))300 f = remote.load_module("load_act.o")301 # verify302 ctx = remote.ext_dev(0)303 a_np = np.random.randint(304 -16, 16, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)305 if use_imm:306 res_np = np_op(a_np, imm) if np_op else tvm_op(a_np, imm)307 else:308 b_np = np.random.randint(309 -16, 16, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(b.dtype)310 res_np = np_op(a_np, b_np) if np_op else tvm_op(a_np, b_np)311 res_np = res_np.astype(res.dtype)312 a_nd = tvm.nd.array(a_np, ctx)313 res_nd = tvm.nd.array(314 np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), ctx)315 if use_imm:316 f(a_nd, res_nd)317 else:318 b_nd = tvm.nd.array(b_np, ctx)319 f(a_nd, b_nd, res_nd)320 np.testing.assert_equal(res_np, res_nd.asnumpy())321 check_alu(lambda x, y: x << y, np.left_shift, use_imm=True)322 check_alu(tvm.max, np.maximum, use_imm=True)323 check_alu(tvm.max, np.maximum)324 check_alu(lambda x, y: x + y, use_imm=True)325 check_alu(lambda x, y: x + y)326 check_alu(lambda x, y: x >> y, np.right_shift, use_imm=True)327 vta.testing.run(_run)328def test_relu():329 """Test RELU on ALU"""330 def _run(env, remote):331 m = 8332 n = 10333 # compute334 a = tvm.placeholder(335 (m, n, env.BATCH, env.BLOCK_OUT),336 name="a",337 dtype=env.acc_dtype)338 a_buf = tvm.compute(339 (m, n, env.BATCH, env.BLOCK_OUT),340 lambda *i: a(*i),341 "a_buf") # DRAM->SRAM342 max_buf = tvm.compute(343 (m, n, env.BATCH, env.BLOCK_OUT),344 lambda *i: tvm.max(a_buf(*i), 0),345 "res_buf") # relu346 min_buf = tvm.compute(347 (m, n, env.BATCH, env.BLOCK_OUT),348 lambda *i: tvm.min(max_buf(*i), (1<<(env.INP_WIDTH-1))-1),349 "max_buf") # relu350 res = tvm.compute(351 (m, n, env.BATCH, env.BLOCK_OUT),352 lambda *i: min_buf(*i).astype(env.inp_dtype),353 "min_buf") # SRAM->DRAM354 # schedule355 s = tvm.create_schedule(res.op)356 s[a_buf].set_scope(env.acc_scope) # SRAM357 s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy) # DRAM->SRAM358 s[max_buf].set_scope(env.acc_scope) # SRAM359 s[min_buf].set_scope(env.acc_scope) # SRAM360 s[max_buf].pragma(max_buf.op.axis[0], env.alu) # compute361 s[min_buf].pragma(min_buf.op.axis[0], env.alu) # compute362 s[res].pragma(res.op.axis[0], env.dma_copy) # SRAM->DRAM363 # build364 with vta.build_config():365 mod = vta.build(s, [a, res], "ext_dev", env.target_host)366 if not remote:367 return368 temp = util.tempdir()369 mod.save(temp.relpath("load_act.o"))370 remote.upload(temp.relpath("load_act.o"))371 f = remote.load_module("load_act.o")372 # verify373 ctx = remote.ext_dev(0)374 a_np = np.random.randint(375 -256, 256, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)376 res_np = np.clip(a_np, 0, (1<<(env.INP_WIDTH-1))-1).astype(res.dtype)377 a_nd = tvm.nd.array(a_np, ctx)378 res_nd = tvm.nd.array(379 np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), ctx)380 f(a_nd, res_nd)381 np.testing.assert_equal(res_np, res_nd.asnumpy())382 vta.testing.run(_run)383def test_shift_and_scale():384 """Test shift and scale on ALU"""385 def _run(env, remote):386 m = 2387 n = 8388 imm_shift = np.random.randint(0,8)389 imm_scale = np.random.randint(1,5)390 # compute391 a = tvm.placeholder(392 (m, n, env.BATCH, env.BLOCK_OUT),393 name="a", dtype=env.acc_dtype)394 a_buf = tvm.compute(395 (m, n, env.BATCH, env.BLOCK_OUT),396 lambda *i: a(*i),397 "a_buf") # DRAM->SRAM398 res_shift = tvm.compute(399 (m, n, env.BATCH, env.BLOCK_OUT),400 lambda *i: a_buf(*i)+imm_shift,401 "res_shift") # compute402 res_scale = tvm.compute(403 (m, n, env.BATCH, env.BLOCK_OUT),404 lambda *i: res_shift(*i)>>imm_scale,405 "res_scale") # compute406 res = tvm.compute(407 (m, n, env.BATCH, env.BLOCK_OUT),408 lambda *i: res_scale(*i).astype(env.inp_dtype),409 "res") # SRAM->DRAM410 # schedule411 s = tvm.create_schedule(res.op)412 s[a_buf].set_scope(env.acc_scope) # SRAM413 s[res_shift].set_scope(env.acc_scope) # SRAM414 s[res_scale].set_scope(env.acc_scope) # SRAM415 s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy) # DRAM->SRAM416 s[res_shift].pragma(res_shift.op.axis[0], env.alu) # compute417 s[res_scale].pragma(res_scale.op.axis[0], env.alu) # compute418 s[res].pragma(res.op.axis[0], env.dma_copy) # SRAM->DRAM419 # build420 mod = vta.build(s, [a, res], "ext_dev", env.target_host)421 if not remote:422 return423 temp = util.tempdir()424 mod.save(temp.relpath("load_act.o"))425 remote.upload(temp.relpath("load_act.o"))426 f = remote.load_module("load_act.o")427 # verify428 ctx = remote.ext_dev(0)...

Full Screen

Full Screen

datastructures.py

Source:datastructures.py Github

copy

Full Screen

...20 def __init__(self):21 self.scope = None22 pass23 @abstractmethod24 def set_scope(self, scope):25 self.scope = scope26 @abstractmethod27 def execute(self):28 pass29 @abstractmethod30 def __str__(self):31 return ""32class Constant(Node):33 def __init__(self, value):34 self.value = value35 super().__init__()36 def set_scope(self, scope):37 super().set_scope(scope)38 def execute(self):39 return self.value40 def __str__(self):41 return f"Constant({self.value})"42class VariableDecleration(Node):43 def __init__(self, name: str, value: Node):44 self.name = name45 self.value = value46 super().__init__()47 def execute(self):48 value = self.value.execute()49 prop = Property(self.name, value)50 self.scope.add_property(prop)51 return value52 def __str__(self):53 return f"VarDec({self.name},{self.value})"54class Variable(Node):55 def __init__(self, name, value=None, mode=READ):56 self.name = name57 self.mode = mode58 self.value = value59 super().__init__()60 def set_scope(self, scope):61 super().set_scope(scope)62 if self.value:63 self.value.set_scope(scope)64 def execute(self):65 prop = self.get()66 if not prop:67 raise CommandError(f"{self.name} not defined")68 if self.mode == WRITE:69 value = self.value.execute()70 return prop.set(value)71 return prop.get()72 def get(self):73 return self.scope.find_property(self.name)74 def __str__(self):75 return f"VarAccess({self.name}, {self.mode}, {self.value})"76class Attribute(Node):77 def __init__(self, name, attribute, value=None, mode=READ):78 self.name = name79 self.attribute = attribute80 self.value = value81 self.mode = mode82 super().__init__()83 def set_scope(self, scope):84 super().set_scope(scope)85 new_scope = Scope()86 new_scope.add_property(Property(self.attribute.name, self))87 self.attribute.set_scope(new_scope)88 if self.value:89 self.value.set_scope(scope)90 def get(self):91 prop = self.scope.find_property(self.name)92 parent = prop.get()93 if isinstance(parent, Attribute):94 root = parent.get()95 if self.name not in root.__dict__:96 raise CommandError() # TODO proper message error97 return root.__getattribute__(self.name)98 return parent99 def set(self, value):100 if isinstance(self.attribute, Variable):101 parent = self.get()102 attribute = self.attribute.name103 if attribute not in parent.__dict__:104 raise CommandError() # TODO proper message105 parent.__setattr__(attribute, value)106 return value107 return self.attribute.set(value)108 def execute(self):109 if self.mode == WRITE:110 value = self.value.execute()111 self.set(value)112 return value113 return self.get()114 def __str__(self):115 return f"AttrAccess({self.name}, {self.attribute}, {self.mode}, {self.value})"116class CodeBlock(Node):117 def __init__(self, instructions):118 self.instructions = instructions119 super().__init__()120 def set_scope(self, scope):121 self.scope = Scope(parent=scope)122 for instr in self.instructions:123 instr.set_scope(self.scope)124 def execute(self):125 ret = None126 for instr in self.instructions:127 ret = instr.execute()128 return ret129 def __str__(self):130 code = "\n".join([str(c) for c in self.instructions])131 return "{\n" + code + "\n}"132class IfElse(Node):133 def __init__(self, condition, if_block, else_block=None):134 self.condition = condition135 self.if_block = if_block136 self.else_block = else_block137 super().__init__()138 def set_scope(self, scope):139 super().set_scope(scope)140 self.condition.set_scope(scope)141 self.if_block.set_scope(scope)142 if self.else_block:143 self.else_block.set_scope(scope)144 def execute(self):145 if self.condition.execute():146 return self.if_block.execute()147 elif self.else_block:148 return self.else_block.execute()149 def __str__(self):150 l1 = f"if ({self.condition})"151 l2 = str(self.if_block)152 l3 = ("else\n" + str(self.else_block)) if self.else_block else ""153 return "\n".join([l1, l2, l3])154class While(Node):155 def __init__(self, condition, block):156 self.condition = condition157 self.loop = block158 super().__init__()159 def set_scope(self, scope):160 super().set_scope(scope)161 self.condition.set_scope(scope)162 self.loop.set_scope(scope)163 def execute(self):164 while self.condition.execute():165 self.loop.execute()166 def __str__(self):167 l1 = f"while ({self.condition})"168 l2 = str(self.loop)169 return l1 + "\n" + l2170class FunctionDecleration(Node):171 def __init__(self, name, args, code):172 self.name = name173 self.args = args174 self.code = code175 super().__init__()176 def set_scope(self, scope):177 self.code.set_scope(scope)178 def run(self, *args):179 if len(args) != len(self.args):180 raise Exception181 scope = self.code.scope182 if not scope:183 raise Exception184 for name, value in zip(self.args, args):185 scope.add_property(Property(name, value))186 self.code.set_scope(scope)187 self.code.execute()188 def execute(self):189 functions_list.add_property(Property(self.name, self.run))190 def __str__(self):191 return f"FunDecl({self.name}, {self.args}, {self.code})"192class FunctionCall(Node):193 def __init__(self, name, args=[]):194 self.name = name195 self.args = args196 super().__init__()197 def set_scope(self, scope):198 super().set_scope(scope)199 for arg in self.args:200 arg.set_scope(scope)201 def execute(self):202 fun = functions_list.find_property(self.name)203 args = [p.execute() for p in self.args]204 if fun:205 fun.get()(*args)206 def __str__(self):207 return f"FunCall({self.name}, {self.args})"208class Program(Node):209 def __init__(self, commands):210 self.commands = commands211 super().__init__()212 def execute(self):213 for c in self.commands:214 c.execute()215 def set_scope(self, scope):216 super().set_scope(scope)217 for c in self.commands:218 c.set_scope(scope)219 def __str__(self):220 return "\n".join([str(c) for c in self.commands])221class BinaryOp(Node):222 def __init__(self, op):223 self.op = op224 self.x = None225 self.y = None226 super().__init__()227 def set_scope(self, scope):228 super().set_scope(scope)229 self.x.set_scope(scope)230 self.y.set_scope(scope)231 def execute(self):232 return self.op(self.x.execute(), self.y.execute())233 def __str__(self):...

Full Screen

Full Screen

Student.py

Source:Student.py Github

copy

Full Screen

...15 def age(self, age):16 self.age = age17def set_age(self, age):18 self.age = age19def set_scope(self, scope):20 self.scope = scope21def dynamic_demo():22 # 动态绑定属性23 a = Student()24 a.name = 'Alice'25 print(a.name)26 # __slots__定义的属性仅对当前类实例起作用,对继承的子类是不起作用的27 JuniorStudent.set_scope = MethodType(set_scope, JuniorStudent)28 d = JuniorStudent()29 d.set_scope(99)30 print(d.scope)31 # 动态绑定方法32 a.set_age = MethodType(set_age, a)33 a.set_age(26)34 print(a.age)35 # 给一个实例绑定的方法 对另一个实例是不起作用的36 # b = Student()37 # b.set_age(36)38 # print(b.age)39 # 可以给class绑定方法 所有实例均可调用40 # 由于'score'没有被放到__slots__中,41 # 所以不能绑定score属性42 Student.set_scope = MethodType(set_scope, Student)43 c = Student()44 c.set_scope(99)45 print(c.scope)46# 定制类47class Fib(object):48 def __init__(self):49 self.a = 050 self.b = 151 def __iter__(self):52 return self53 def __next__(self):54 self.a, self.b = self.b, self.a + self.b55 if self.a > 1000:56 return StopIteration()57 return self.a58 def __getitem__(self, n):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run tempest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful