How to use multi_scale_search method in Airtest

Best Python code snippet using Airtest

play.py

Source:play.py Github

copy

Full Screen

...8import shutil9import time10import cv211import numpy as np12def multi_scale_search(pivot, screen, range=0.3, num=10):13 H, W = screen.shape[:2]14 h, w = pivot.shape[:2]15 found = None16 for scale in np.linspace(1 - range, 1 + range, num)[::-1]:17 resized = cv2.resize(screen, (int(W * scale), int(H * scale)))18 r = W / float(resized.shape[1])19 if resized.shape[0] < h or resized.shape[1] < w:20 break21 res = cv2.matchTemplate(resized, pivot, cv2.TM_CCOEFF_NORMED)22 loc = np.where(res >= res.max())23 pos_h, pos_w = list(zip(*loc))[0]24 if found is None or res.max() > found[-1]:25 found = (pos_h, pos_w, r, res.max())26 if found is None: return (0, 0, 0, 0, 0)27 pos_h, pos_w, r, score = found28 start_h, start_w = int(pos_h * r), int(pos_w * r)29 end_h, end_w = int((pos_h + h) * r), int((pos_w + w) * r)30 return [start_h, start_w, end_h, end_w, score]31class WechatAutoJump(object):32 def __init__(self, sensitivity, debug, resource_dir):33 self.sensitivity = sensitivity34 self.debug = debug35 self.resource_dir = resource_dir36 self.bb_size = [300, 300]37 self.step = 138 self.load_resource()39 if self.debug:40 if not os.path.exists(self.debug):41 os.mkdir(self.debug)42 def load_resource(self):43 self.player = cv2.imread(os.path.join(self.resource_dir, 'player.png'), 0)44 circle_file = glob.glob(os.path.join(self.resource_dir, 'circle/*.png'))45 table_file = glob.glob(os.path.join(self.resource_dir, 'table/*.png'))46 self.jump_file = [cv2.imread(name, 0) for name in circle_file + table_file]47 def get_current_state(self):48 pic_filename = 'state{:03d}.png'.format(self.step)49 state = cv2.imread(pic_filename)50 self.resolution = state.shape[:2]51 scale = state.shape[1] / 720.52 state = cv2.resize(state, (720, int(state.shape[0] / scale)), interpolation=cv2.INTER_NEAREST)53 if state.shape[0] > 1280:54 s = (state.shape[0] - 1280) // 255 state = state[s:(s + 1280), :, :]56 elif state.shape[0] < 1280:57 s1 = (1280 - state.shape[0]) // 258 s2 = (1280 - state.shape[0]) - s159 pad1 = 255 * np.ones((s1, 720, 3), dtype=np.uint8)60 pad2 = 255 * np.ones((s2, 720, 3), dtype=np.uint8)61 state = np.concatenate((pad1, state, pad2), 0)62 return state63 def get_player_position(self, state):64 state = cv2.cvtColor(state, cv2.COLOR_BGR2GRAY)65 pos = multi_scale_search(self.player, state, 0.3, 10)66 h, w = int((pos[0] + 13 * pos[2]) / 14.), (pos[1] + pos[3]) // 267 return np.array([h, w])68 def get_target_position(self, state, player_pos):69 state = cv2.cvtColor(state, cv2.COLOR_BGR2GRAY)70 sym_center = [1280, 720] - player_pos71 sym_tl = np.maximum([0, 0], sym_center + np.array([-self.bb_size[0] // 2, -self.bb_size[1] // 2]))72 sym_br = np.array(73 [min(sym_center[0] + self.bb_size[0] // 2, player_pos[0]), min(sym_center[0] + self.bb_size[1] // 2, 720)])74 state_cut = state[sym_tl[0]:sym_br[0], sym_tl[1]:sym_br[1]]75 target_pos = None76 for target in self.jump_file:77 pos = multi_scale_search(target, state_cut, 0.4, 15)78 if target_pos is None or pos[-1] > target_pos[-1]:79 target_pos = pos80 return np.array([(target_pos[0] + target_pos[2]) // 2, (target_pos[1] + target_pos[3]) // 2]) + sym_tl81 def get_target_position_fast(self, state, player_pos):82 state_cut = state[:player_pos[0], :, :]83 m1 = (state_cut[:, :, 0] == 245)84 m2 = (state_cut[:, :, 1] == 245)85 m3 = (state_cut[:, :, 2] == 245)86 m = np.uint8(np.float32(m1 * m2 * m3) * 255)87 b1, b2 = cv2.connectedComponents(m)88 for i in range(1, np.max(b2) + 1):89 x, y = np.where(b2 == i)90 # print('fast', len(x))91 if len(x) > 280 and len(x) < 310:...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1import cv2;2import numpy as np3import os, glob, shutil4import random5def multi_scale_search(pivot, screen, range=0.3, num=10):6 H, W = screen.shape[:2]7 h, w = pivot.shape[:2]8 found = None9 for scale in np.linspace(1-range, 1+range, num)[::-1]:10 resized = cv2.resize(screen, (int(W * scale), int(H * scale)))11 r = W / float(resized.shape[1])12 if resized.shape[0] < h or resized.shape[1] < w:13 break14 res = cv2.matchTemplate(resized, pivot, cv2.TM_CCOEFF_NORMED)15 loc = np.where(res >= res.max())16 pos_h, pos_w = list(zip(*loc))[0]17 if found is None or res.max() > found[-1]:18 found = (pos_h, pos_w, r, res.max())19 if found is None: return (0,0,0,0,0)20 pos_h, pos_w, r, score = found21 start_h, start_w = int(pos_h * r), int(pos_w * r)22 end_h, end_w = int((pos_h + h) * r), int((pos_w + w) * r)23 return [start_h, start_w, end_h, end_w, score]24class wechat_jump(object):25 def __init__(self):26 self.resource_dir = "./resources"27 self.sensitivity = 2.04528 self.bb_size = [300, 300]29 self.load_resources()30 31 def load_resources(self):32 self.player = cv2.imread(os.path.join(self.resource_dir + '/position/player.png'), 0)33 circle_file = glob.glob(os.path.join(self.resource_dir + '/position/circle/*.png'))34 table_file = glob.glob(os.path.join(self.resource_dir + '/position/table/*.png'))35 self.jump_file = [cv2.imread(name, 0) for name in circle_file + table_file]36 def get_player_position(self, state):37 state = cv2.cvtColor(state, cv2.COLOR_BGR2GRAY)38 pos = multi_scale_search(self.player, state, 0.3, 10)39 h, w = int((pos[0] + 13 * pos[2])/14.), (pos[1] + pos[3])//240 return np.array([h, w])41 def get_target_position_fast(self, state, player_pos):42 state_cut = state[:player_pos[0],:,:]43 m1 = (state_cut[:, :, 0] == 245)44 m2 = (state_cut[:, :, 1] == 245)45 m3 = (state_cut[:, :, 2] == 245)46 m = np.uint8(np.float32(m1 * m2 * m3) * 255)47 b1, b2 = cv2.connectedComponents(m)48 for i in range(1, np.max(b2) + 1):49 x, y = np.where(b2 == i)50 # print('fast', len(x))51 if len(x) > 280 and len(x) < 310:52 r_x = []53 r_y = x, y54 h, w = int(r_x.mean()), int(r_y.mean())55 return np.array([h, w])56 def get_target_position(self, state, player_pos):57 state = cv2.cvtColor(state, cv2.COLOR_BGR2GRAY)58 sym_center = [1280, 720] - player_pos59 sym_tl = np.maximum([0,0], sym_center + np.array([-self.bb_size[0]//2, -self.bb_size[1]//2]))60 sym_br = np.array([min(sym_center[0] + self.bb_size[0]//2, player_pos[0]), min(sym_center[0] + self.bb_size[1]//2, 720)])61 state_cut = state[sym_tl[0]:sym_br[0], sym_tl[1]:sym_br[1]]62 target_pos = None63 for target in self.jump_file:64 pos = multi_scale_search(target, state_cut, 0.4, 15)65 if target_pos is None or pos[-1] > target_pos[-1]:66 target_pos = pos67 return np.array([(target_pos[0]+target_pos[2])//2, (target_pos[1]+target_pos[3])//2]) + sym_tl68 def get_state(self):69 # state image70 # os.system('adb shell screencap -p /sdcard/state.png')71 # os.system('adb pull /sdcard/state.png ' + self.resource_dir + '/screen/state.png')72 state = cv2.imread(self.resource_dir + '/screen/state.png')73 self.resolution = state.shape[:2]74 scale = state.shape[1] / 720.075 state = cv2.resize(state, (720, int(state.shape[0] / scale)), interpolation=cv2.INTER_NEAREST)76 if state.shape[0] > 1280:77 s = state.shape[0] - 128078 state = state[s:,:,:]...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Airtest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful