How to use _get_key_points method in Airtest

Best Python code snippet using Airtest

keypointMatching.py

Source:keypointMatching.py Github

copy

Full Screen

...40 # 检测图片是否正常41 if not check_image_valid(img_source, img_search):42 raise Exception(img_source, img_search, "空图像")43 # 获取特征点集,匹配特征44 kp1, kp2, matches = _get_key_points(img_source, img_search, ratio)45 print(len(kp1), len(kp2), len(matches))46 # 关键点匹配数量,匹配掩码47 (matchNum, matchesMask) = getMatchNum(matches, ratio)48 print(matchNum, len(matchesMask))49 # 关键点匹配置信度50 matcheRatio = matchNum / len(matchesMask)51 if matcheRatio >= 0 and matcheRatio <= 1:52 return matcheRatio53 else:54 raise Exception("SIFT Score Error", matcheRatio)55# ---------------------------surf--------------------------#56def _init_surf():57 '''58 Make sure that there is SURF module in OpenCV.59 '''60 if cv2.__version__.startswith("3."):61 # OpenCV3.x, sift is in contrib module;62 # you need to compile it seperately.63 try:64 surf = cv2.xfeatures2d.SURF_create(hessianThreshold=400)65 except surf:66 print("to use SURF, you should build contrib with opencv3.0")67 raise Exception(68 "There is no SURF module in your OpenCV environment !")69 else:70 # OpenCV2.x, just use it.71 surf = cv2.SURF(hessianThreshold=100)72 return surf73def find_surf(img_source, img_search, ratio=FILTER_RATIO):74 '''75 基于surf进行图像识别76 :param img_source: 资源图77 :param img_search: 待对比图78 :param ratio: 优秀特征点过滤比例值79 :return: 相似阈值80 '''81 # 检测图片是否正常82 if not check_image_valid(img_source, img_search):83 raise Exception(img_source, img_search, "空图像")84 # 获取特征点集,匹配特征85 surf = _init_surf()86 kp1, des1 = surf.detectAndCompute(img_source, None)87 kp2, des2 = surf.detectAndCompute(img_search, None)88 # When apply knnmatch , make sure that number of features in both test and89 # query image is greater than or equal to number of90 # nearest neighbors in knn match.91 if len(kp1) < 2 or len(kp2) < 2:92 raise Exception("Not enough feature points in input images !")93 # 匹配两个图片中的特征点集,k=2表示每个特征点取出2个最匹配的对应点:94 matches = FLANN.knnMatch(des1, des2, k=2)95 print(len(matches))96 # 关键点匹配数量,匹配掩码97 (matchNum, matchesMask) = getMatchNum(matches, ratio)98 print(matchNum, len(matchesMask))99 # 关键点匹配置信度100 matcheRatio = matchNum / len(matchesMask)101 if matcheRatio >= 0 and matcheRatio <= 1:102 return matcheRatio103 else:104 raise Exception("SURF Score Error", matcheRatio)105def _get_key_points(im_source, im_search, ratio):106 ''' 根据传入图像,计算所有的特征点并匹配特征点对 '''107 # 初始化sift算子108 sift = _init_sift()109 # 获取特征点集110 kp_sch, des_sch = sift.detectAndCompute(im_search, None)111 kp_src, des_src = sift.detectAndCompute(im_source, None)112 # When apply knnmatch , make sure that number of features in both test and113 # query image is greater than or equal to number of114 # nearest neighbors in knn match.115 if len(kp_sch) < 2 or len(kp_src) < 2:116 raise Exception("Not enough feature points in input images !")117 # 匹配两个图片中的特征点集,k=2表示每个特征点取出2个最匹配的对应点:118 matches = FLANN.knnMatch(des_sch, des_src, k=2)119 return kp_sch, kp_src, matches...

Full Screen

Full Screen

sift.py

Source:sift.py Github

copy

Full Screen

...6 # 第一步:检验图像是否正常:7 if not check_image_valid(im_source, im_search):8 return None9 # 第二步:获取特征点集并匹配出特征点对: 返回值 good, pypts, kp_sch, kp_src10 kp_sch, kp_src, good = _get_key_points(im_source, im_search, good_ratio)11 # 第三步:根据匹配点对(good),提取出来识别区域:12 if len(good) == 0:13 # 匹配点对为0,无法提取识别区域:14 return None15 elif len(good) == 1:16 # 匹配点对为1,可信度赋予设定值,并直接返回:17 return _handle_one_good_points(kp_src, good, threshold) if ONE_POINT_CONFI >= threshold else None18 elif len(good) == 2:19 # 匹配点对为2,根据点对求出目标区域,据此算出可信度:20 origin_result = _handle_two_good_points(im_source, im_search, kp_src, kp_sch, good)21 if isinstance(origin_result, dict):22 return origin_result if ONE_POINT_CONFI >= threshold else None23 else:24 middle_point, pypts, w_h_range = _handle_two_good_points(im_source, im_search, kp_src, kp_sch, good)25 elif len(good) == 3:26 # 匹配点对为3,取出点对,求出目标区域,据此算出可信度:27 origin_result = _handle_three_good_points(im_source, im_search, kp_src, kp_sch, good)28 if isinstance(origin_result, dict):29 return origin_result if ONE_POINT_CONFI >= threshold else None30 else:31 middle_point, pypts, w_h_range = _handle_three_good_points(im_source, im_search, kp_src, kp_sch, good)32 else:33 # 匹配点对 >= 4个,使用单矩阵映射求出目标区域,据此算出可信度:34 middle_point, pypts, w_h_range = _many_good_pts(im_source, im_search, kp_sch, kp_src, good)35 # 第四步:根据识别区域,求出结果可信度,并将结果进行返回:36 # 对识别结果进行合理性校验: 小于5个像素的,或者缩放超过5倍的,一律视为不合法直接raise.37 _target_error_check(w_h_range)38 # 将截图和识别结果缩放到大小一致,准备计算可信度39 x_min, x_max, y_min, y_max, w, h = w_h_range40 target_img = im_source[y_min:y_max, x_min:x_max]41 resize_img = cv2.resize(target_img, (w, h))42 confidence = _cal_sift_confidence(im_search, resize_img, rgb=rgb)43 best_match = generate_result(middle_point, pypts, confidence)44 print("[aircv][sift] threshold=%s, result=%s" % (threshold, best_match))45 return best_match if confidence >= threshold else None46def _get_key_points(im_source, im_search, good_ratio):47 """根据传入图像,计算图像所有的特征点,并得到匹配特征点对."""48 # 准备工作: 初始化sift算子49 sift = _init_sift()50 # 第一步:获取特征点集,并匹配出特征点对: 返回值 good, pypts, kp_sch, kp_src51 kp_sch, des_sch = sift.detectAndCompute(im_search, None)52 kp_src, des_src = sift.detectAndCompute(im_source, None)53 # When apply knnmatch , make sure that number of features in both test and54 # query image is greater than or equal to number of nearest neighbors in knn match.55 if len(kp_sch) < 2 or len(kp_src) < 2:56 raise NoSiftMatchPointError("Not enough feature points in input images !")57 # 匹配两个图片中的特征点集,k=2表示每个特征点取出2个最匹配的对应点:58 matches = FLANN.knnMatch(des_sch, des_src, k=2)59 good = []60 # good为特征点初选结果,剔除掉前两名匹配太接近的特征点,不是独特优秀的特征点直接筛除(多目标识别情况直接不适用)...

Full Screen

Full Screen

DepthMapImage.py

Source:DepthMapImage.py Github

copy

Full Screen

...3class DepthMapImage:4 def __init__(self, img1, img2): 5 self.img1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)6 self.img2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)7 def _get_key_points(self):8 sift = cv.SIFT_create()9 10 points1 = sift.detectAndCompute(self.img1, None)11 points2 = sift.detectAndCompute(self.img2, None)12 return points1, points213 def _get_good_matches(self, points):14 kp1, des1 = points[0]15 kp2, des2 = points[1]16 FLANN_INDEX_KDTREE = 117 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)18 search_params = dict(checks=50)19 flann = cv.FlannBasedMatcher(index_params, search_params)20 matches = flann.knnMatch(des1, des2, k=2)21 matchesMask = [[0, 0] for _ in range(len(matches))]22 good = []23 pts1 = []24 pts2 = []25 for i, (m, n) in enumerate(matches):26 if m.distance < 0.7*n.distance:27 matchesMask[i] = [1, 0]28 good.append(m)29 pts2.append(kp2[m.trainIdx].pt)30 pts1.append(kp1[m.queryIdx].pt)31 return pts1, pts232 33 def _stereo_ratification(self, data):34 pts1, pts2 = data35 pts1 = np.int32(pts1)36 pts2 = np.int32(pts2)37 38 fundamental_matrix, inliers = cv.findFundamentalMat(pts1, pts2, cv.FM_RANSAC)39 pts1 = pts1[inliers.ravel() == 1]40 pts2 = pts2[inliers.ravel() == 1]41 h1, w1 = self.img1.shape42 h2, w2 = self.img2.shape43 _, H1, H2 = cv.stereoRectifyUncalibrated(44 np.float32(pts1), np.float32(pts2), fundamental_matrix, imgSize=(w1, h1)45 )46 img1_rectified = cv.warpPerspective(self.img1, H1, (w1, h1))47 img2_rectified = cv.warpPerspective(self.img2, H2, (w2, h2))48 block_size = 1149 min_disp = -12850 max_disp = 12851 num_disp = max_disp - min_disp52 uniquenessRatio = 553 speckleWindowSize = 20054 speckleRange = 255 disp12MaxDiff = 056 stereo = cv.StereoSGBM_create(57 minDisparity=min_disp,58 numDisparities=num_disp,59 blockSize=block_size,60 uniquenessRatio=uniquenessRatio,61 speckleWindowSize=speckleWindowSize,62 speckleRange=speckleRange,63 disp12MaxDiff=disp12MaxDiff,64 P1=8 * 1 * block_size * block_size,65 P2=32 * 1 * block_size * block_size,66 )67 68 disparity_SGBM = stereo.compute(img1_rectified, img2_rectified)69 disparity_SGBM = cv.normalize(disparity_SGBM, disparity_SGBM, alpha=255,70 beta=0, norm_type=cv.NORM_MINMAX)71 disparity_SGBM = np.uint8(disparity_SGBM)72 return disparity_SGBM73 def get_ratification_disparity_map(self):74 points = self._get_key_points()75 matchs = self._get_good_matches(points)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Airtest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful