How to use _find_homography method in Airtest

Best Python code snippet using Airtest

sift.py

Source:sift.py Github

copy

Full Screen

...134 """特征点匹配点对数目>=4个,可使用单矩阵映射,求出识别的目标区域."""135 sch_pts, img_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(136 -1, 1, 2), np.float32([kp_src[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)137 # M是转化矩阵138 M, mask = _find_homography(sch_pts, img_pts)139 matches_mask = mask.ravel().tolist()140 # 从good中间筛选出更精确的点(假设good中大部分点为正确的,由ratio=0.7保障)141 selected = [v for k, v in enumerate(good) if matches_mask[k]]142 # 针对所有的selected点再次计算出更精确的转化矩阵M来143 sch_pts, img_pts = np.float32([kp_sch[m.queryIdx].pt for m in selected]).reshape(144 -1, 1, 2), np.float32([kp_src[m.trainIdx].pt for m in selected]).reshape(-1, 1, 2)145 M, mask = _find_homography(sch_pts, img_pts)146 # 计算四个角矩阵变换后的坐标,也就是在大图中的目标区域的顶点坐标:147 h, w = im_search.shape[:2]148 h_s, w_s = im_source.shape[:2]149 pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)150 dst = cv2.perspectiveTransform(pts, M)151 # trans numpy arrary to python list: [(a, b), (a1, b1), ...]152 def cal_rect_pts(dst):153 return [tuple(npt[0]) for npt in dst.astype(int).tolist()]154 pypts = cal_rect_pts(dst)155 # 注意:虽然4个角点有可能越出source图边界,但是(根据精确化映射单映射矩阵M线性机制)中点不会越出边界156 lt, br = pypts[0], pypts[2]157 middle_point = int((lt[0] + br[0]) / 2), int((lt[1] + br[1]) / 2)158 # 考虑到算出的目标矩阵有可能是翻转的情况,必须进行一次处理,确保映射后的“左上角”在图片中也是左上角点:159 x_min, x_max = min(lt[0], br[0]), max(lt[0], br[0])160 y_min, y_max = min(lt[1], br[1]), max(lt[1], br[1])161 # 挑选出目标矩形区域可能会有越界情况,越界时直接将其置为边界:162 # 超出左边界取0,超出右边界取w_s-1,超出下边界取0,超出上边界取h_s-1163 # 当x_min小于0时,取0。 x_max小于0时,取0。164 x_min, x_max = int(max(x_min, 0)), int(max(x_max, 0))165 # 当x_min大于w_s时,取值w_s-1。 x_max大于w_s-1时,取w_s-1。166 x_min, x_max = int(min(x_min, w_s - 1)), int(min(x_max, w_s - 1))167 # 当y_min小于0时,取0。 y_max小于0时,取0。168 y_min, y_max = int(max(y_min, 0)), int(max(y_max, 0))169 # 当y_min大于h_s时,取值h_s-1。 y_max大于h_s-1时,取h_s-1。170 y_min, y_max = int(min(y_min, h_s - 1)), int(min(y_max, h_s - 1))171 # 目标区域的角点,按左上、左下、右下、右上点序:(x_min,y_min)(x_min,y_max)(x_max,y_max)(x_max,y_min)172 pts = np.float32([[x_min, y_min], [x_min, y_max], [173 x_max, y_max], [x_max, y_min]]).reshape(-1, 1, 2)174 pypts = cal_rect_pts(pts)175 return middle_point, pypts, [x_min, x_max, y_min, y_max, w, h]176def _two_good_points(pts_sch1, pts_sch2, pts_src1, pts_src2, im_search, im_source):177 """返回两对匹配特征点情形下的识别结果."""178 # 先算出中心点(在im_source中的坐标):179 middle_point = [int((pts_src1[0] + pts_src2[0]) / 2), int((pts_src1[1] + pts_src2[1]) / 2)]180 pypts = []181 # 如果特征点同x轴或同y轴(无论src还是sch中),均不能计算出目标矩形区域来,此时返回值同good=1情形182 if pts_sch1[0] == pts_sch2[0] or pts_sch1[1] == pts_sch2[1] or pts_src1[0] == pts_src2[0] or pts_src1[1] == pts_src2[1]:183 confidence = ONE_POINT_CONFI184 one_match = generate_result(middle_point, pypts, confidence)185 return one_match186 # 计算x,y轴的缩放比例:x_scale、y_scale,从middle点扩张出目标区域:(注意整数计算要转成浮点数结果!)187 h, w = im_search.shape[:2]188 h_s, w_s = im_source.shape[:2]189 x_scale = abs(1.0 * (pts_src2[0] - pts_src1[0]) / (pts_sch2[0] - pts_sch1[0]))190 y_scale = abs(1.0 * (pts_src2[1] - pts_src1[1]) / (pts_sch2[1] - pts_sch1[1]))191 # 得到scale后需要对middle_point进行校正,并非特征点中点,而是映射矩阵的中点。192 sch_middle_point = int((pts_sch1[0] + pts_sch2[0]) / 2), int((pts_sch1[1] + pts_sch2[1]) / 2)193 middle_point[0] = middle_point[0] - int((sch_middle_point[0] - w / 2) * x_scale)194 middle_point[1] = middle_point[1] - int((sch_middle_point[1] - h / 2) * y_scale)195 middle_point[0] = max(middle_point[0], 0) # 超出左边界取0 (图像左上角坐标为0,0)196 middle_point[0] = min(middle_point[0], w_s - 1) # 超出右边界取w_s-1197 middle_point[1] = max(middle_point[1], 0) # 超出上边界取0198 middle_point[1] = min(middle_point[1], h_s - 1) # 超出下边界取h_s-1199 # 计算出来rectangle角点的顺序:左上角->左下角->右下角->右上角, 注意:暂不考虑图片转动200 # 超出左边界取0, 超出右边界取w_s-1, 超出下边界取0, 超出上边界取h_s-1201 x_min, x_max = int(max(middle_point[0] - (w * x_scale) / 2, 0)), int(202 min(middle_point[0] + (w * x_scale) / 2, w_s - 1))203 y_min, y_max = int(max(middle_point[1] - (h * y_scale) / 2, 0)), int(204 min(middle_point[1] + (h * y_scale) / 2, h_s - 1))205 # 目标矩形的角点按左上、左下、右下、右上的点序:(x_min,y_min)(x_min,y_max)(x_max,y_max)(x_max,y_min)206 pts = np.float32([[x_min, y_min], [x_min, y_max], [x_max, y_max], [x_max, y_min]]).reshape(-1, 1, 2)207 for npt in pts.astype(int).tolist():208 pypts.append(tuple(npt[0]))209 return middle_point, pypts, [x_min, x_max, y_min, y_max, w, h]210def _find_homography(sch_pts, src_pts):211 """多组特征点对时,求取单向性矩阵."""212 try:213 M, mask = cv2.findHomography(sch_pts, src_pts, cv2.RANSAC, 5.0)214 except Exception:215 import traceback216 traceback.print_exc()217 raise HomographyError("OpenCV error in _find_homography()...")218 else:219 if mask is None:220 raise HomographyError("In _find_homography(), find no mask...")221 else:222 return M, mask223def _target_error_check(w_h_range):224 """校验识别结果区域是否符合常理."""225 x_min, x_max, y_min, y_max, w, h = w_h_range226 tar_width, tar_height = x_max - x_min, y_max - y_min227 # 如果src_img中的矩形识别区域的宽和高的像素数<5,则判定识别失效。认为提取区域待不可能小于5个像素。(截图一般不可能小于5像素)228 if tar_width < 5 or tar_height < 5:229 raise SiftResultCheckError("In src_image, Taget area: width or height < 5 pixel.")230 # 如果矩形识别区域的宽和高,与sch_img的宽高差距超过5倍(屏幕像素差不可能有5倍),认定为识别错误。231 if tar_width < 0.2 * w or tar_width > 5 * w or tar_height < 0.2 * h or tar_height > 5 * h:232 raise SiftResultCheckError("Target area is 5 times bigger or 0.2 times smaller than sch_img.")233def _cal_sift_confidence(im_search, resize_img, rgb=False):234 if rgb:...

Full Screen

Full Screen

sift_test.py

Source:sift_test.py Github

copy

Full Screen

...44good = good_diff45# print(good)46# for i in good:47# print(i.distance)48def _find_homography(sch_pts, src_pts):49 """多组特征点对时,求取单向性矩阵."""50 try:51 M, mask = cv2.findHomography(sch_pts, src_pts, cv2.RANSAC, 5.0)52 except Exception:53 import traceback54 traceback.print_exc()55 raise Exception("OpenCV error in _find_homography()...")56 else:57 if mask is None:58 raise Exception("In _find_homography(), find no mask...")59 else:60 return M, mask61def _many_good_pts(im_source, im_search, kp_sch, kp_src, good):62 """特征点匹配点对数目>=4个,可使用单矩阵映射,求出识别的目标区域."""63 sch_pts, img_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(64 -1, 1, 2), np.float32([kp_src[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)65 # M是转化矩阵66 M, mask = _find_homography(sch_pts, img_pts)67 matches_mask = mask.ravel().tolist()68 # 从good中间筛选出更精确的点(假设good中大部分点为正确的,由ratio=0.7保障)69 selected = [v for k, v in enumerate(good) if matches_mask[k]]70 # 针对所有的selected点再次计算出更精确的转化矩阵M来71 sch_pts, img_pts = np.float32([kp_sch[m.queryIdx].pt for m in selected]).reshape(72 -1, 1, 2), np.float32([kp_src[m.trainIdx].pt for m in selected]).reshape(-1, 1, 2)73 M, mask = _find_homography(sch_pts, img_pts)74 # print(M, mask)75 # 计算四个角矩阵变换后的坐标,也就是在大图中的目标区域的顶点坐标:76 h, w = im_search.shape[:2]77 h_s, w_s = im_source.shape[:2]78 pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)79 dst = cv2.perspectiveTransform(pts, M)80 # trans numpy arrary to python list: [(a, b), (a1, b1), ...]81 def cal_rect_pts(dst):82 return [tuple(npt[0]) for npt in dst.astype(int).tolist()]83 pypts = cal_rect_pts(dst)84 # 注意:虽然4个角点有可能越出source图边界,但是(根据精确化映射单映射矩阵M线性机制)中点不会越出边界85 lt, br = pypts[0], pypts[2]86 middle_point = int((lt[0] + br[0]) / 2), int((lt[1] + br[1]) / 2)87 # 考虑到算出的目标矩阵有可能是翻转的情况,必须进行一次处理,确保映射后的“左上角”在图片中也是左上角点:...

Full Screen

Full Screen

homography.py

Source:homography.py Github

copy

Full Screen

...6 def __init__(self, matcher: cv2.DescriptorMatcher, neighbours: int = 3):7 self.matcher = matcher8 self.matches: List[cv2.DMatch] = list()9 self.neighbours = neighbours10 def _find_homography(self,11 kp_frame: List[cv2.KeyPoint],12 kp_marker: List[cv2.KeyPoint],13 matches: List[cv2.DMatch],14 threshold: float = 5.001,15 maxiters: int = 10000000) -> Array[float]:16 src_pts = np.float32([kp_marker[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)17 dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)18 M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,19 ransacReprojThreshold=threshold, maxIters=maxiters)20 return M21 def __call__(self, 22 kp_frame: List[cv2.KeyPoint],23 kp_marker: List[cv2.KeyPoint],24 des_frame: List[cv2.DMatch],25 des_marker: List[cv2.DMatch],26 n_best: int = 100,27 threshold: float = 5.0) -> Array[float]:28 29 if isinstance(self.matcher, cv2.FlannBasedMatcher):30 self.matches = self.matcher.knnMatch(des_marker, des_frame, k=self.neighbours)31 else:32 self.matches = self.matcher.match(des_marker, des_frame)33 self.matches = sorted(self.matches, key=lambda x: x.distance)[:n_best]...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Airtest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful