How to use img_debug method in Pyscreenshot

Best Python code snippet using pyscreenshot_python

deskew.py

Source:deskew.py Github

copy

Full Screen

1import cv22import imutils3import numpy as np4import mahotas5from field_test.smart_test.license_plate_recognition.kmeans import kMeans6from field_test.smart_test.license_plate_recognition.find_color import car_plate_color7def distance(pt1, pt2):8 return np.sqrt((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2)9def four_perspective(img, pt1, pt2, pt3, pt4):10 w = (distance(pt1, pt2) + distance(pt3, pt4)) / 211 h = (distance(pt2, pt3) + distance(pt1, pt4)) / 212 ori = [[pt1[0], pt1[1]], [pt2[0], pt2[1]],13 [pt3[0], pt3[1]], [pt4[0], pt4[1]]]14 ori = np.array(ori).astype(np.float32)15 dst = [[0, 0], [w, 0],16 [w, h], [0, h]]17 dst = np.array(dst).astype(np.float32)18 # compute the perspective transform matrix and then apply it19 M = cv2.getPerspectiveTransform(ori, dst)20 img = cv2.warpPerspective(img, M, (int(w), int(h)), flags=cv2.INTER_LANCZOS4,21 borderMode=cv2.BORDER_CONSTANT, borderValue=0)22 return img23def deskew(ori_img, iteration=36, remove_bg=False):24 # preprocess25 img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)26 img = cv2.medianBlur(img, 3)27 h, w = img.shape[:2]28 val, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)29 img = cv2.GaussianBlur(img, (7, 7), 0)30 kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))31 img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=iteration, borderType=cv2.BORDER_CONSTANT,32 borderValue=0)33 _, img = cv2.threshold(img, 1, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)34 # rotation fix35 coords = np.column_stack(np.where(img > 0)) # get all non-zero pixel coords36 anchor, size, angle = cv2.minAreaRect(coords) # bound them with a rotated rect37 # angle of minAreaRect is confusing, recommends to a good answer here38 # https://stackoverflow.com/questions/15956124/minarearect-angles-unsure-about-the-angle-returned39 if angle < -45:40 angle = -(90 + angle)41 center = (anchor[0] + size[1] / 2, anchor[1] + size[0] / 2)42 else:43 angle = -angle44 center = (anchor[0] + size[0] / 2, anchor[1] + size[1] / 2)45 M = cv2.getRotationMatrix2D(center, angle, 1)46 img_padded = cv2.copyMakeBorder(ori_img, int(0.2 * h), int(0.2 * h), int(0.2 * w), int(0.2 * w),47 cv2.BORDER_CONSTANT, value=0)48 h_padded, w_padded = img_padded.shape[:2]49 warp = cv2.warpAffine(img_padded, M, (w_padded, h_padded), flags=cv2.INTER_LANCZOS4,50 borderMode=cv2.BORDER_CONSTANT, borderValue=0)51 if remove_bg:52 mask_padded = cv2.copyMakeBorder(img, int(0.2 * h), int(0.2 * h), int(0.2 * w), int(0.2 * w),53 cv2.BORDER_CONSTANT, value=0)54 mask_h_padded, mask_w_padded = mask_padded.shape[:2]55 mask_warp = cv2.warpAffine(mask_padded, M, (mask_w_padded, mask_h_padded), flags=cv2.INTER_LANCZOS4,56 borderMode=cv2.BORDER_CONSTANT, borderValue=0)57 coords = np.column_stack(np.where(mask_warp > 0)) # get all non-zero pixel coords58 anchor, size, angle = cv2.minAreaRect(coords) # bound them with a rotated rect59 if angle < -45:60 warp = warp[int(anchor[0] - size[1] / 2):int(anchor[0] + size[1] / 2),61 int(anchor[1] - size[0] / 2):int(anchor[1] + size[0] / 2)]62 else:63 warp = warp[int(anchor[0] - size[0] / 2):int(anchor[0] + size[0] / 2),64 int(anchor[1] - size[1] / 2):int(anchor[1] + size[1] / 2)]65 return warp66def detrap(ori_img):67 # preprocess68 img_debug = ori_img69 # try to extract car license plate using color matching70 img_color = car_plate_color(img_debug, iteration=12)71 try:72 img_debug = deskew(img_color, remove_bg=True)73 img_debug = imutils.resize(img_debug, width=320, inter=cv2.INTER_LANCZOS4)74 cv2.imshow('img_debug_color', img_debug)75 cv2.waitKey(0)76 except:77 return ori_img78 img = cv2.cvtColor(img_debug, cv2.COLOR_BGR2GRAY)79 img = cv2.medianBlur(img, 3)80 h, w = img.shape[:2]81 # find out the best otsu despite of black pixels82 if len(img[img > 0]) > 0:83 T = mahotas.otsu(img[img > 0])84 else:85 T = 086 print('otsu value:{}'.format(T))87 # val, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)88 img[img >= T] = 25589 img[img < T] = 090 img = cv2.GaussianBlur(img, (13, 13), 0)91 ori_h, ori_w = img.shape[:2]92 img = cv2.copyMakeBorder(img, int(0.2 * ori_h), int(0.2 * ori_h), int(0.2 * ori_w), int(0.2 * ori_w),93 cv2.BORDER_CONSTANT, value=0)94 img_debug = cv2.copyMakeBorder(img_debug, int(0.2 * ori_h), int(0.2 * ori_h), int(0.2 * ori_w), int(0.2 * ori_w),95 cv2.BORDER_CONSTANT, value=0)96 kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 3))97 img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=32, borderType=cv2.BORDER_CONSTANT, borderValue=0)98 val, img = cv2.threshold(img, 1, 255, cv2.THRESH_BINARY)99 h, w = img.shape[:2]100 # img = img[int(0.2 * ori_h):h - int(0.2 * ori_h), int(0.2 * ori_w): w - int(0.2 * ori_w)]101 cv2.imshow('img_debug1', img)102 cv2.waitKey(0)103 # remove small contours104 # img = cv2.Canny(img, 1, 1)105 cnts = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]106 if len(cnts) > 0:107 c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]108 img_cnts = np.zeros_like(img, dtype=np.uint8)109 cv2.drawContours(img_cnts, [c], 0, (255, 255, 255), -1)110 cv2.imshow('img_cnts', img_cnts)111 cv2.waitKey(0)112 img = img_cnts113 img_edge = cv2.Canny(img, 1, 1)114 img = cv2.bitwise_and(np.dstack([img, img, img]), img_debug)115 h, w = img.shape[:2]116 accum = 10117 lines = cv2.HoughLinesP(img_edge, 1, np.pi / 180, accum, minLineLength=h // 4, maxLineGap=h)118 # gather the adjacent lines together119 if lines is not None and len(lines) >= 4:120 print('found {} lines'.format(len(lines)))121 lines1 = lines[:, 0, :] # extract to 2d122 k_set = []123 for i, (x1, y1, x2, y2) in enumerate(lines1[:]):124 k_set.append(slope(x1, x2, y1, y2, theta=True))125 # cv2.line(img, (x1, y1), (x2, y2), (128, 128, 128), 5)126 k_set = np.asarray(k_set)127 k_set = k_set.reshape((len(k_set), 1))128 while True:129 # make sure no nan/inf in k130 k_center, k_cluster = kMeans(k_set, 2)131 if k_center is None or k_cluster is None:132 k_center = [0, np.pi / 2]133 k_max = np.max(k_center)134 if not np.isnan(k_max):135 k_center = np.sort(k_center, axis=0)136 break137 h_idx = 0 if np.abs(k_center[0]) < np.abs(k_center[1]) else 1138 v_idx = int(not h_idx)139 theta_h = np.rad2deg(k_center[h_idx])140 theta_v = np.rad2deg(k_center[v_idx]) - theta_h141 if theta_v > 45:142 theta_v = 90 - theta_v143 # fix rotation144 img = imutils.rotate_bound(img, angle=-theta_h)145 cv2.imshow('img_debug_rotation', img)146 cv2.waitKey(0)147 # fix perspective148 ori = [[0, 0], [w - h * np.tan(np.deg2rad(theta_v)), 0],149 [w, h], [h * np.tan(np.deg2rad(theta_v)), h]]150 ori = np.array(ori).astype(np.float32)151 dst = [[0, 0], [w, 0],152 [w, h], [0, h]]153 dst = np.array(dst).astype(np.float32)154 # compute the perspective transform matrix and then apply it155 M = cv2.getPerspectiveTransform(ori, dst)156 old_h, old_w = img.shape[:2]157 img = cv2.copyMakeBorder(img, int(0.2 * old_h), int(0.2 * old_h), int(0.2 * old_w),158 int(0.2 * old_w),159 cv2.BORDER_CONSTANT, value=0)160 new_h, new_w = img.shape[:2]161 img = cv2.warpPerspective(img, M, (new_w, new_h), borderMode=cv2.BORDER_CONSTANT, borderValue=0)162 img = deskew(img, remove_bg=True)163 return img164def slope(x1, x2, y1, y2, theta=True):165 if x1 != x2:166 result = (y2 - y1) / (x2 - x1)167 else:168 result = np.inf169 if theta:170 return np.arctan(result)171 else:172 return result173def intercept(x1, x2, y1, y2):174 if x1 != x2:175 return y1 - ((y2 - y1) / (x2 - x1)) * x1176 else:177 return np.inf178def line_intersection(k1, b1, k2, b2):179 # l1 : y = k1 * x + b1180 # l2 : y = k2 * x + b2181 if k1 != k2:182 x = - (b2 - b1) / (k2 - k1)183 y = k1 * x + b1184 return x, y185 else:186 return np.inf, np.inf187if __name__ == '__main__':188 ori_img = cv2.imread('test/img_debug_color_screenshot_02.08.2018.png')189 # ori_img = cv2.imread('f:/temp/test.jpg')190 # warp = deskew(ori_img, remove_bg=True)191 trap = detrap(ori_img)192 cv2.imshow('img_p', trap)193 cv2.waitKey(0)194 # cv2.imshow('warp', warp)195 # cv2.waitKey(0)...

Full Screen

Full Screen

run_picar.py

Source:run_picar.py Github

copy

Full Screen

1# This is the main code to run the pi car.2import time3import numpy as np4from cv2 import cv2 as cv5import driver6import image_processing7import camera_capturer8import utils9DEBUG = False10PERIOD = 0 # the period of image caption, processing and sending signal11OFFSET = 37112def cruise_control(bias, k_p=1, k_i=0, k_d=1):13 """ Controls the picar on the mode of cruise14 """15 return 016def cruise():17 """ Tracks the black line.18 Acquires images from front camera and uses it to do pure pursuit.19 Uses functions in driver.py to drive the pi car.20 There is a three-step process to reach the goal.21 Step 1.22 Employs CameraCapturer class to acquire images from front camera and23 rectify lens distortion.24 Step 2.25 Chooses the ROI and binarizes the it. Then uses morphology method to26 get the target point.27 Step 3.28 According to target point, applies pure pursuit algorithm and uses29 functions in driver.py to drive the car.30 Args:31 None32 Returns:33 None34 """35 # Initialize CameraCapturer and drive36 cap = camera_capturer.CameraCapturer("front")37 d = driver.driver()38 d.setStatus(motor=0.4, servo=0, mode="speed")39 last_time = time.time()40 target = OFFSET # -int(cap.width / 5)41 # Parameters of PID controller42 kp = 1.243 ki = 044 kd = 0.145 # Initialize error to 0 for PID controller46 error_p = 047 error_i = 048 error_d = 049 error = 050 servo = 051 last_servo = 052 last_angle = 053 n = 0.254 try:55 while True:56 this_time = time.time()57 if this_time - last_time > PERIOD: # Execute the code below every58 # PERIOD time59 last_time = this_time60 # d.setStatus(motor=0, servo=n, mode="speed")61 # n = -n62 # continue63 # ----------------------------------------------------------- #64 # Start your code here #65 # Image processing. Outputs a target_point.66 frame = cap.get_frame()67 start = time.time()68 skel, img_bin_rev = image_processing.image_process(frame)69 white_rate = \70 np.size(img_bin_rev[img_bin_rev == 255]) / img_bin_rev.size71 if white_rate > 0.3:72 print("stay", white_rate)73 d.setStatus(servo=last_servo)74 continue75 target_point, width, _, img_DEBUG, angle = \76 choose_target_point(skel, target)77 end = time.time()78 if angle == 0:79 angle = last_angle80 pass81 else:82 # Update the PID error83 error_p = angle # **(9/7)84 error_i += error_p85 error_d = error_p - error86 error = error_p87 # PID controller88 servo = utils.constrain(- kp*error_p89 - ki*error_i90 - kd*error_d,91 1, -1)92 d.setStatus(servo=servo)93 last_servo = servo94 # print("servo: ", servo, "error_p: ", error_p)95 # img_DEBUG[:, target] = 25596 # if DEBUG:97 # # cv.imshow("frame", frame)98 # cv.imshow("img_bin_rev", img_bin_rev)99 # cv.imshow("img_DEBUG", img_DEBUG)100 # cv.waitKey(300)101 # ----------------------------------------------------------- #102 else:103 # time.sleep(0.01)104 pass105 except KeyboardInterrupt:106 d.setStatus(servo=0, motor=0)107def choose_target_point(skel, target):108 """ Selects a target poitn from skeleton for pure pursuit.109 Draws a ellipse and applies an and operation to the ellipse with the skel.110 Then returns a point that has least distance with the center of the111 ellipse.112 Args:113 skel: skeleton of trajectory.114 Returns:115 target_point: target point for pure pursuit.116 """117 width = skel.shape[1]118 height = skel.shape[0]119 img = np.zeros((height, width), dtype=np.uint8)120 ellipse_a = width // 2121 ellipse_b = height // 3122 ellipse = cv.ellipse(img,123 center=(target, height),124 axes=(ellipse_a, ellipse_b),125 angle=0,126 startAngle=180,127 endAngle=360,128 color=255,129 thickness=1)130 img_points = np.bitwise_and(skel, ellipse)131 _, contours, _ = cv.findContours(img_points,132 mode=cv.RETR_EXTERNAL,133 method=cv.CHAIN_APPROX_NONE)134 discrete_points = []135 img_DEBUG = np.zeros((height, width, 3), dtype=np.uint8)136 img_DEBUG[:, :, 0] = skel137 img_DEBUG[:, :, 1] = img_points138 img_DEBUG[:, :, 2] = ellipse139 cv.imwrite("img_DEBUG_2.jpg", img_DEBUG)140 # cv.waitKey(200)141 for contour in contours:142 if contour.size == 2:143 discrete_points.append(np.squeeze(contour))144 else:145 pass146 # discrete_points = sorted(discrete_points,147 # key=lambda x: (x[0] - width // 2)**2 +148 # (x[1] - height) ** 2)149 discrete_points = sorted(discrete_points,150 key=lambda x: np.abs(x[0] - target))151 if len(discrete_points) != 0:152 px = discrete_points[0][0] - target153 angle = np.arctan2(px, ellipse_a)154 # angle = angle[0]155 print("angle:", angle)156 return discrete_points[0], width, height, img_DEBUG, angle157 else:158 return [0, 0], width, height, img_DEBUG, 0159 # return [target, 0], width, height, img_DEBUG160if __name__ == "__main__":...

Full Screen

Full Screen

screenpoint.py

Source:screenpoint.py Github

copy

Full Screen

1import numpy as np2import cv23import logging4MIN_MATCH_COUNT = 205FLANN_INDEX_KDTREE = 06sift = cv2.xfeatures2d.SIFT_create()7def project(view, screen, point=None, debug=False):8 kp_screen, des_screen = sift.detectAndCompute(screen, None)9 kp_view, des_view = sift.detectAndCompute(view, None)10 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)11 search_params = dict(checks=50)12 flann = cv2.FlannBasedMatcher(index_params, search_params)13 matches = flann.knnMatch(des_screen, des_view, k=2)14 # Store all good matches as per Lowe's ration test15 good = []16 for m, n in matches:17 if m.distance < 0.7 * n.distance:18 good.append(m)19 if len(good) < MIN_MATCH_COUNT:20 logging.debug("ScreenPoint: Not enough matches.")21 return -1, -122 screen_pts = np.float32([kp_screen[m.queryIdx].pt23 for m in good]).reshape(-1, 1, 2)24 view_pts = np.float32([kp_view[m.trainIdx].pt25 for m in good]).reshape(-1, 1, 2)26 if point==None:27 h, w = view.shape[0],view.shape[1]28 h=(h - 1) * 0.529 w=(w - 1) * 0.530 else:31 w, h = point32 # print(h,w)33 M, mask = cv2.findHomography(view_pts, screen_pts, cv2.RANSAC, 5.0)34 pts = np.float32([[w, h]]).reshape(-1, 1, 2)35 dst = cv2.perspectiveTransform(pts, M)36 x, y = np.int32(dst[0][0])37 if debug:38 img_debug = draw_debug_(x, y, view, screen, M, mask, kp_screen,39 kp_view, good)40 return x, y, img_debug41 else:42 return x, y43def draw_debug_(x, y, view, screen, M, mask, kp_screen, kp_view, good):44 matchesMask = mask.ravel().tolist()45 draw_params = dict(46 matchColor=(0, 255, 0), # draw matches in green color47 singlePointColor=None,48 matchesMask=matchesMask, # draw only inliers49 flags=2)50 img_debug = cv2.drawMatches(screen, kp_screen, view, kp_view, good, None,51 **draw_params)52 # Get view centroid coordinates in img_debug space.53 cx = int(view.shape[1] * 0.5 + screen.shape[1])54 cy = int(view.shape[0] * 0.5)55 # Draw view outline.56 cv2.rectangle(img_debug, (screen.shape[1], 0),57 (img_debug.shape[1] - 2, img_debug.shape[0] - 2),58 (0, 0, 255), 2)59 # draw connecting line.60 cv2.polylines(img_debug, [np.int32([(x, y), (cx, cy)])], True,61 (100, 100, 255), 1, cv2.LINE_AA)62 # Draw query/match markers.63 cv2.drawMarker(img_debug, (cx, cy), (0, 0, 255), cv2.MARKER_CROSS, 30, 2)64 cv2.circle(img_debug, (x, y), 10, (0, 0, 255), -1)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Pyscreenshot automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful