How to use get_keypoints_and_descriptors method in Airtest

Best Python code snippet using Airtest

practica3.py

Source:practica3.py Github

copy

Full Screen

1import cv2 as cv2import pickle3import numpy as np4import random5from auxFunc import *6from matplotlib import pyplot as plt7import os8import collections9from collections import Counter, defaultdict10black = [0,0,0]11white = [255,255,255]12font = cv.FONT_HERSHEY_PLAIN13np.random.seed(1024)14random.seed(1024)15def read_image(filename, flagColor):16 if flagColor == True:17 return cv.imread(filename, cv.IMREAD_COLOR)18 else:19 return cv.imread(filename, cv.IMREAD_GRAYSCALE)20def display_multiple_images(vim, title = None, col = 3, color = white):21 width_rows = []22 height_rows = []23 24 for i in range(0,len(vim)):25 vim[i] = cv.copyMakeBorder(vim[i],20,0,4,4,cv.BORDER_CONSTANT,value=color ) 26 27 if title != None:28 cv.putText(vim[i],title[i],(10,15), font, 1,(0,0,0), 1, 0)29 for i in range(0,len(vim),col):30 width_rows.append(sum( [ im.shape[1] for im in vim[i:min(i+col,len(vim))] ] ))31 height_rows.append(max( [ im.shape[0] for im in vim[i:min(i+col,len(vim))] ] ))32 # ancho total del mapa de imagenes33 width = max(width_rows)34 # altura total del mapa de imagenes35 height = sum(height_rows)36 # creación de la matriz para el mapa de imagenes37 image_map = np.zeros( (height, width,3) , np.uint8)38 # rellenamos el mapa39 inicio_fil = 040 for i in range(0,len(vim),col):41 inicio_col = 042 # rellenamos una fila de imagenes43 for k in range(i,min(i+col,len(vim))):44 actual_image = vim[k]45 if len(actual_image.shape) < 3:46 actual_image = cv.cvtColor(actual_image, cv.COLOR_GRAY2RGB)47 for row in range(actual_image.shape[0]):48 for col in range(actual_image.shape[1]):49 image_map[inicio_fil+row][inicio_col + col] = actual_image[row][col]50 inicio_col += actual_image.shape[1]51 inicio_fil += height_rows[i//col]52 # Visualization 53 display_image(image_map)54def display_image(im):55 cv.imshow('Imagen', im)56 k = cv.waitKey(0) 57 cv.destroyAllWindows()58 59############################ EJERCICIO 1 ############################60def get_keyPoints_and_descriptors(img, mask=None):61 sift = cv.xfeatures2d.SIFT_create() 62 return sift.detectAndCompute(img, mask)63def getMatchesKNN(desc_1, desc_2, k_=2):64 bf = cv.BFMatcher()65 matches = bf.knnMatch(desc_1, desc_2, k=k_)66 67 good = []68 for m,n in matches:69 if m.distance < 0.75*n.distance:70 good.append([m])71 72 return good73# Función para calcular las correspondencias de las imágenes, pero la primera de ello con una máscara.74def draw_matches_mask(img1,img2,points):75 np.set_printoptions(threshold=np.nan)76 # Mascara77 mask = np.zeros(shape=(np.shape(img1)[0], np.shape(img1)[1])) 78 cv2.fillConvexPoly(mask, np.array(points, dtype=np.int32), color=1)79 # Descriptores80 kp1, desc1 = get_keyPoints_and_descriptors(img1, mask=np.array(mask, dtype=np.uint8))81 kp2,desc2 = get_keyPoints_and_descriptors(img2)82 # Correspondencias83 matches = getMatchesKNN(desc1, desc2)84 # Dibujamos correspondencias85 img_matches_2 = cv.drawMatchesKnn(img1, kp1, img2, kp2,matches1to2=matches,outImg=None,flags=2)86 display_image(img_matches_2)87print("EJERCICIO 1")88# prueba 189print("Prueba 1")90img1 = cv2.imread("imagenes/64.png")91img2 = cv2.imread("imagenes/65.png")92points = [(470,125), (620, 125), (620, 330), (470, 330)]93draw_matches_mask(img1, img2,points)94# prueba 295print("Prueba 2")96img3 = cv2.imread("imagenes/57.png")97img4 = cv2.imread("imagenes/58.png")98points_2 = [(10,90), (120,90), (120,220), (10,220)]99draw_matches_mask(img3, img4, points_2)100# prueba 3101print("Prueba 3")102img5 = cv2.imread("imagenes/54.png")103img6 = cv2.imread("imagenes/55.png")104points_3 = [(40,90), (250,90), (250,240), (40,240)]105draw_matches_mask(img5, img6, points_3)106############################ EJERCICIO 2 ############################107print("EJERCICIO 2")108# Se guardan los nombres de los diccionarios109dictionary_name = "imagenes/kmeanscenters2000.pkl"110# Se guarda el nombre del fichero que contiene los descriptores y parches111descriptors_and_patches_name = "imagenes/descriptorsAndpatches2000.pkl"112descriptors, patches = loadAux(descriptors_and_patches_name, True)113accuracy, labels, dictionary = loadDictionary(dictionary_name)114def get_histogram(img_name, dictionary, dictionary_norm):115 # Leer imagen116 img = cv2.imread("imagenes/"+img_name)117 # Obtener descriptores118 kp, desc = get_keyPoints_and_descriptors(img)119 desc_norm = np.apply_along_axis(np.linalg.norm, 1, desc)120 # Calcular similaridad y normalizar121 similarities = np.dot(dictionary, desc.T)122 similarities_norm = np.divide(similarities, desc_norm*dictionary_norm[:,None])123 histogram = Counter(np.argmax(similarities_norm, axis=0))124 return histogram125def get_inverted_file_index(dictionary_name):126 accuracy, labels, dictionary = loadDictionary(dictionary_name)127 dictionary_norm = np.apply_along_axis(np.linalg.norm, 1, dictionary)128 histograms = []129 inverted_file = collections.defaultdict(list)130 for img in range(441):131 img_name = str(img) + ".png"132 histogram = get_histogram(img_name, dictionary, dictionary_norm)133 histograms.append(histogram)134 for i in histogram:135 inverted_file[i].append(img)136 return dict(inverted_file), histograms137def retrieval_images(image_name, inverted_file, histograms):138 histogram = histograms[int(os.path.splitext(image_name)[0])]139 histogram_image = [histogram[k] for k in range(2000)]140 images = []141 for i in histogram:142 for img in inverted_file[i]:143 images.append(img)144 # Obtenemos las imágenes junto al número de apariciones145 images_names, _ = np.unique(images, return_counts=True)146 values = []147 for i in images_names:148 histogram_images_i = [histograms[int(i)][k] for k in range(2000)]149 similarity = np.dot(histogram_images_i, histogram_image)150 n1 = np.linalg.norm(histogram_images_i)151 n2 = np.linalg.norm(histogram_image)152 similarity = similarity/(n1*n2)153 values.append(similarity)154 values = np.array(values)155 index_sort = np.argsort(-values)156 images_names_sorted = images_names[index_sort]157 display_image(cv2.imread("imagenes/"+image_name))158 for i in images_names_sorted[0:5]: 159 display_image(cv2.imread("imagenes/"+str(i)+".png"))160print("Obteniendo indice invertido...")161inverted_file, histograms = get_inverted_file_index(dictionary_name)162print("Obtenido con éxito.")163print("Imagen-pregunta 1")164retrieval_images("353.png", inverted_file, histograms)165print("Imagen-pregunta 2")166retrieval_images("4.png", inverted_file, histograms)167print("Imagen-pregunta 3")...

Full Screen

Full Screen

keypoint_matching_contrib.py

Source:keypoint_matching_contrib.py Github

copy

Full Screen

...34 self.star_detector = cv2.FeatureDetector_create("STAR")35 self.brief_extractor = cv2.DescriptorExtractor_create("BRIEF")36 # create BFMatcher object:37 self.matcher = cv2.BFMatcher(cv2.NORM_L1) # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable)38 def get_keypoints_and_descriptors(self, image):39 """获取图像特征点和描述符."""40 # find the keypoints with STAR41 kp = self.star_detector.detect(image, None)42 # compute the descriptors with BRIEF43 keypoints, descriptors = self.brief_extractor.compute(image, kp)44 return keypoints, descriptors45 def match_keypoints(self, des_sch, des_src):46 """Match descriptors (特征值匹配)."""47 # 匹配两个图片中的特征点集,k=2表示每个特征点取出2个最匹配的对应点:48 return self.matcher.knnMatch(des_sch, des_src, k=2)49class SIFTMatching(KeypointMatching):50 """SIFT Matching."""51 METHOD_NAME = "SIFT" # 日志中的方法名52 # SIFT识别特征点匹配,参数设置:53 FLANN_INDEX_KDTREE = 054 def init_detector(self):55 """Init keypoint detector object."""56 if check_cv_version_is_new():57 try:58 # opencv3 >= 3.4.12 or opencv4 >=4.5.0, sift is in main repository59 self.detector = cv2.SIFT_create(edgeThreshold=10)60 except AttributeError:61 try:62 self.detector = cv2.xfeatures2d.SIFT_create(edgeThreshold=10)63 except:64 raise NoModuleError(65 "There is no %s module in your OpenCV environment, need contrib module!" % self.METHOD_NAME)66 else:67 # OpenCV2.x68 self.detector = cv2.SIFT(edgeThreshold=10)69 # # create FlnnMatcher object:70 self.matcher = cv2.FlannBasedMatcher({'algorithm': self.FLANN_INDEX_KDTREE, 'trees': 5}, dict(checks=50))71 def get_keypoints_and_descriptors(self, image):72 """获取图像特征点和描述符."""73 keypoints, descriptors = self.detector.detectAndCompute(image, None)74 return keypoints, descriptors75 def match_keypoints(self, des_sch, des_src):76 """Match descriptors (特征值匹配)."""77 # 匹配两个图片中的特征点集,k=2表示每个特征点取出2个最匹配的对应点:78 return self.matcher.knnMatch(des_sch, des_src, k=2)79class SURFMatching(KeypointMatching):80 """SURF Matching."""81 METHOD_NAME = "SURF" # 日志中的方法名82 # 是否检测方向不变性:0检测/1不检测83 UPRIGHT = 084 # SURF算子的Hessian Threshold85 HESSIAN_THRESHOLD = 40086 # SURF识别特征点匹配方法设置:87 FLANN_INDEX_KDTREE = 088 def init_detector(self):89 """Init keypoint detector object."""90 # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:91 if check_cv_version_is_new():92 # OpenCV3/4, surf is in contrib module, you need to compile it seperately.93 try:94 self.detector = cv2.xfeatures2d.SURF_create(self.HESSIAN_THRESHOLD, upright=self.UPRIGHT)95 except:96 raise NoModuleError("There is no %s module in your OpenCV environment, need contribmodule!" % self.METHOD_NAME)97 else:98 # OpenCV2.x99 self.detector = cv2.SURF(self.HESSIAN_THRESHOLD, upright=self.UPRIGHT)100 # # create FlnnMatcher object:101 self.matcher = cv2.FlannBasedMatcher({'algorithm': self.FLANN_INDEX_KDTREE, 'trees': 5}, dict(checks=50))102 def get_keypoints_and_descriptors(self, image):103 """获取图像特征点和描述符."""104 keypoints, descriptors = self.detector.detectAndCompute(image, None)105 return keypoints, descriptors106 def match_keypoints(self, des_sch, des_src):107 """Match descriptors (特征值匹配)."""108 # 匹配两个图片中的特征点集,k=2表示每个特征点取出2个最匹配的对应点:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Airtest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful