How to use analyze_frame method in ATX

Best Python code snippet using ATX

test_hole_in_the_wall_model.py

Source:test_hole_in_the_wall_model.py Github

copy

Full Screen

...100 """101 test_model = HoleInTheCameraGame()102 # creates an all black image103 test_image = np.zeros([480, 640, 3])104 test_model.analyze_frame(test_image)105 assert len(test_model.joint_candidates) == 0106def test_analyze_frame_black_image_joint_subsets():107 """108 Tests that deep pose does not return any joint subsets109 for an empty black image.110 """111 test_model = HoleInTheCameraGame()112 test_image = np.zeros([480, 640, 3])113 test_model.analyze_frame(test_image)114 assert len(test_model.joint_subsets) == 0115def test_analyze_frame_white_image_joint_candidates():116 """117 Tests that deep pose does not find any joints118 for a white image.119 """120 test_model = HoleInTheCameraGame()121 # creates an all white image122 test_image = np.ones([480, 640, 3]) * 255123 test_model.analyze_frame(test_image)124 assert len(test_model.joint_candidates) == 0125def test_analyze_frame_white_image_joint_subsets():126 """127 Tests that deep pose does not return any joint subsets128 for a white image.129 """130 test_model = HoleInTheCameraGame()131 test_image = np.ones([480, 640, 3]) * 255132 test_model.analyze_frame(test_image)133 assert len(test_model.joint_subsets) == 0134def test_analyze_frame_no_legs_joint_candidates():135 """136 Tests that deep pose does not find extra joints137 for a mask containing only the upper body of a person.138 """139 test_model = HoleInTheCameraGame()140 # reads a stored image used to create game masks141 test_image = cv2.imread("images/poses/first_mask.png")142 test_model.analyze_frame(test_image)143 assert len(test_model.joint_candidates) == 14144def test_analyze_frame_no_legs_num_joint_subsets():145 """146 Test that deep pose only finds one person's joint subset147 when there's only one person in the image.148 """149 test_model = HoleInTheCameraGame()150 test_image = cv2.imread("images/poses/first_mask.png")151 test_model.analyze_frame(test_image)152 assert len(test_model.joint_subsets) == 1153def test_analyze_frame_no_legs_first_joint_subset():154 """155 Test that the only joint subset found by deep pose contains all156 expected information, including 17 joint positions and three157 values about the accuracy of the fit.158 """159 test_model = HoleInTheCameraGame()160 test_image = cv2.imread("images/poses/first_mask.png")161 test_model.analyze_frame(test_image)162 assert len(test_model.joint_subsets[0]) == 20163def test_analyze_frame_half_upper_joint_candidates():164 """165 Tests that deep pose does not find extra joints166 for a mask containing only the upper body of a person.167 """168 test_model = HoleInTheCameraGame()169 # splits the image in half to test behavior on a cut off person170 test_image = cv2.imread("images/poses/first_mask.png")[:, 0:325, :]171 test_model.analyze_frame(test_image)172 assert len(test_model.joint_candidates) == 7173def test_analyze_frame_half_upper_num_joint_subsets():174 """175 Tests that deep pose only found one joint subset for an image176 with only half a person.177 """178 test_model = HoleInTheCameraGame()179 test_image = cv2.imread("images/poses/first_mask.png")[:, :325, :]180 test_model.analyze_frame(test_image)181 assert len(test_model.joint_subsets) == 1182def test_analyze_frame_half_upper_first_joint_subset():183 """184 Test that the only joint subset found by deep pose contains all185 expected information, including 17 joint positions and three186 values about the accuracy of the fit.187 """188 test_model = HoleInTheCameraGame()189 test_image = cv2.imread("images/poses/first_mask.png")[:, :325, :]190 test_model.analyze_frame(test_image)191 assert len(test_model.joint_subsets[0]) == 20192def test_analyze_frame_other_half_upper_joint_candidates():193 """194 Tests that deep pose only found one joint subset for an image195 with only the other half a person.196 """197 test_model = HoleInTheCameraGame()198 test_image = cv2.imread("images/poses/first_mask.png")[:, 350:, :]199 test_model.analyze_frame(test_image)200 assert len(test_model.joint_candidates) == 7201def test_analyze_frame_other_half_upper_num_joint_subsets():202 """203 Tests that deep pose only found one joint subset for an image204 with only the other half a person.205 """206 test_model = HoleInTheCameraGame()207 test_image = cv2.imread("images/poses/first_mask.png")[:, 350:, :]208 test_model.analyze_frame(test_image)209 assert len(test_model.joint_subsets) == 1210def test_analyze_frame_other_half_upper_first_joint_subset():211 """212 Test that the only joint subset found by deep pose contains all213 expected information, including 17 joint positions and three214 values about the accuracy of the fit.215 """216 test_model = HoleInTheCameraGame()217 test_image = cv2.imread("images/poses/first_mask.png")[:, 350:, :]218 test_model.analyze_frame(test_image)219 assert len(test_model.joint_subsets[0]) == 20220def test_parse_for_joint_positions_black_image():221 """222 Test that when analyzing the deep pose result, there are no joint223 positions that can be found or parsed for in a black image.224 """225 test_model = HoleInTheCameraGame()226 test_image = np.zeros([480, 640, 3])227 test_model.analyze_frame(test_image)228 test_model.parse_for_joint_positions()229 assert test_model.joint_positions == {}230def test_parse_for_joint_positions_white_image():231 """232 Test that when analyzing the deep pose result, there are no joint233 positions that can be found or parsed for in a white image.234 """235 test_model = HoleInTheCameraGame()236 test_image = np.ones([480, 640, 3]) * 255237 test_model.analyze_frame(test_image)238 test_model.parse_for_joint_positions()239 assert test_model.joint_positions == {}240def test_parse_for_joint_positions_no_legs_joints_detected():241 """242 Test that when analyzing and parsing through the deep pose result, the243 joints that are expected to be missing (9, 10, 11, 12 as these are the leg244 joint) are mapped to a [-1, -1] position and the others are not mapped to245 [-1, -1].246 """247 test_model = HoleInTheCameraGame()248 test_image = cv2.imread("images/poses/first_mask.png")249 test_model.analyze_frame(test_image)250 test_model.parse_for_joint_positions()251 for key, value in test_model.joint_positions.items():252 if key in ["9", "10", "12", "13"]:253 if value != [-1, -1]:254 assert False255 else:256 if value == [-1, -1]:257 assert False258 assert True259def test_parse_for_joint_positions_no_legs_found_joint_positions():260 """261 Test for when analyzing the parsing through the deep pose result, the262 existing joints (joints not in the legs) are mapped to positions that lie263 within the pixel bounds of the inputted image.264 """265 test_model = HoleInTheCameraGame()266 test_image = cv2.imread("images/poses/first_mask.png")267 test_model.analyze_frame(test_image)268 test_model.parse_for_joint_positions()269 for key, value in test_model.joint_positions.items():270 if key not in ["9", "10", "12", "13"]:271 if value[0] > 640 or value[0] < 0 or value[1] > 480 or value[1] < 0:272 assert False273 assert True274def test_parse_for_joint_positions_half_upper_joints_detected():275 """276 Test that when analyzing and parsing through the deep pose result, the277 joints that are expected to be missing (1, 5, 6, 7, 8, 9, 10, 11, 12, 13,278 18 as these are the leg joint or upper body joints that are not included in279 the inputted image) are mapped to a [-1, -1] position and the others are280 not mapped to [-1, -1].281 """282 test_model = HoleInTheCameraGame()283 test_image = cv2.imread("images/poses/first_mask.png")[:, :325, :]284 test_model.analyze_frame(test_image)285 test_model.parse_for_joint_positions()286 for key, value in test_model.joint_positions.items():287 if key in ["1", "5", "6", "7", "8", "9", "10", "11", "12", "13", "17"]:288 if value != [-1, -1]:289 assert False290 else:291 if value == [-1, -1]:292 assert False293 assert True294def test_parse_for_joint_positions_half_upper_found_joint_positions():295 """296 Test for when analyzing the parsing through the deep pose result, the297 existing joints (joints not in the legs or half of the upper body) are298 mapped to positions that lie within the pixel bounds of the inputted image.299 """300 test_model = HoleInTheCameraGame()301 test_image = cv2.imread("images/poses/first_mask.png")[:, :325, :]302 test_model.analyze_frame(test_image)303 test_model.parse_for_joint_positions()304 for key, value in test_model.joint_positions.items():305 if key not in ['1', '5', '6', '7', '8', '9', '10', '11', '12', '13',306 '17']:307 if value[0] > 640 or value[0] < 0 or value[1] > 480 or value[1] < 0:308 assert False309 assert True310def test_parse_for_joint_positions_other_half_upper_joints_detected():311 """312 Test that when analyzing and parsing through the deep pose result, the313 joints that are expected to be missing (0, 2, 3, 4, 8, 9, 10, 11, 12, 13,314 14, 15, 16 as these are the leg joint or upper body joints that are not315 included in the inputted image) are mapped to a [-1, -1] position and the316 others are not mapped to [-1, -1].317 """318 test_model = HoleInTheCameraGame()319 test_image = cv2.imread("images/poses/first_mask.png")[:, 350:, :]320 test_model.analyze_frame(test_image)321 test_model.parse_for_joint_positions()322 for key, value in test_model.joint_positions.items():323 if key in ['0', '2', '3', '4', '8', '9', '10', '12', '13', '14', '15',324 '16']:325 if value != [-1, -1]:326 assert False327 else:328 if value == [-1, -1]:329 assert False330 assert True331def test_parse_for_joint_positions_other_half_upper_found_joint_positions():332 """333 Test for when analyzing the parsing through the deep pose result, the334 existing joints (joints not in the legs or half of the upper body) are335 mapped to positions that lie within the pixel bounds of the inputted image.336 """337 test_model = HoleInTheCameraGame()338 test_image = cv2.imread("images/poses/first_mask.png")[:, 350:, :]339 test_model.analyze_frame(test_image)340 test_model.parse_for_joint_positions()341 for key, value in test_model.joint_positions.items():342 if key not in ['0', '2', '3', '4', '8', '9', '10', '12', '13', '14',343 '15', '16']:344 if value[0] > 640 or value[0] < 0 or value[1] > 480 or value[1] < 0:345 assert False346 assert True347def test_compute_accuracy_white_image_total_score():348 """349 Test that the computed fit accuracy is 0 when a white image is analyzed and350 compared to existing joint positions. Based on this computed accuracy, the351 total score should be updated to be 0.352 """353 test_model = HoleInTheCameraGame()354 test_image = np.ones([480, 640, 3]) * 255355 test_csv = "mask_joint_positions/first_mask.csv"356 test_model.analyze_frame(test_image)357 test_model.parse_for_joint_positions()358 test_model.compute_accuracy(test_csv)359 assert test_model.total_score == 0360def test_compute_accuracy_white_image_trial_score():361 """362 Test that the computed fit accuracy is 0 when a white image is analyzed and363 compared to existing joint positions. Based on this computed accuracy, the364 trial score should be updated to be 0.365 """366 test_model = HoleInTheCameraGame()367 test_image = np.ones([480, 640, 3]) * 255368 test_csv = "mask_joint_positions/first_mask.csv"369 test_model.analyze_frame(test_image)370 test_model.parse_for_joint_positions()371 test_model.compute_accuracy(test_csv)372 assert test_model.trial_score == 0373def test_compute_accuracy_same_image_total_score():374 """375 Test that when an image is analyzed and compared to against joint positions376 obtained from the same image, the accuracy is a 100% match and the total377 score is updated to reflect such (100).378 """379 test_model = HoleInTheCameraGame()380 test_image = cv2.imread("images/poses/first_mask.png")381 test_csv = "mask_joint_positions/first_mask.csv"382 test_model.analyze_frame(test_image)383 test_model.parse_for_joint_positions()384 test_model.compute_accuracy(test_csv)385 assert test_model.total_score == 100.0386def test_compute_accuracy_same_image_trial_score():387 """388 Test that when an image is analyzed and compared to against joint positions389 obtained from the same image, the accuracy is a 100% match and the trial390 score is updated to reflect such (100).391 """392 test_model = HoleInTheCameraGame()393 test_image = cv2.imread("images/poses/first_mask.png")394 test_csv = "mask_joint_positions/first_mask.csv"395 test_model.analyze_frame(test_image)396 test_model.parse_for_joint_positions()397 test_model.compute_accuracy(test_csv)398 assert test_model.trial_score == 100.0399def test_compute_accuracy_same_image_total_score_three_trials():400 """401 Test that when an image is analyzed and compared to against joint positions402 obtained from the same image, the accuracy is a 100% match three times in a403 row. This is reflected in the total score as it is updated three times with404 a score of 100, yielding a total score of 300.405 """406 test_model = HoleInTheCameraGame()407 test_image = cv2.imread("images/poses/first_mask.png")408 test_csv = "mask_joint_positions/first_mask.csv"409 for _ in range(3):410 test_model.analyze_frame(test_image)411 test_model.parse_for_joint_positions()412 test_model.compute_accuracy(test_csv)413 assert test_model.total_score == 300.0414def test_compute_accuracy_same_image_trial_score_three_trials():415 """416 Test that when an image is analyzed and compared to against joint positions417 obtained from the same image, the accuracy is a 100% match three times in a418 row. For the trial score, this should only reflect 100 as it stores the419 most recent trial score.420 """421 test_model = HoleInTheCameraGame()422 test_image = cv2.imread("images/poses/first_mask.png")423 test_csv = "mask_joint_positions/first_mask.csv"424 for _ in range(3):425 test_model.analyze_frame(test_image)426 test_model.parse_for_joint_positions()427 test_model.compute_accuracy(test_csv)428 assert test_model.trial_score == 100.0429def test_computer_accuracy_diff_image_total_score():430 """431 Test that when an image is analyzed and compared to against a different432 image's joint positions, the accuracy is below 70%. This simulates a failed433 trial of the real game where a user's position is different than the434 expected position. This is reflected by the total score being less than 70.435 """436 test_model = HoleInTheCameraGame()437 test_image = cv2.imread("images/poses/second_mask.png")438 test_csv = "mask_joint_positions/first_mask.csv"439 test_model.analyze_frame(test_image)440 test_model.parse_for_joint_positions()441 test_model.compute_accuracy(test_csv)442 assert test_model.total_score < 70443def test_computer_accuracy_diff_image_trial_score():444 """445 Test that when an image is analyzed and compared to against a different446 image's joint positions, the accuracy is below 70%. This simulates a failed447 trial of the real game where a user's position is different than the448 expected position. This is reflected by the trial score being less than 70.449 """450 test_model = HoleInTheCameraGame()451 test_image = cv2.imread("images/poses/second_mask.png")452 test_csv = "mask_joint_positions/first_mask.csv"453 test_model.analyze_frame(test_image)454 test_model.parse_for_joint_positions()455 test_model.compute_accuracy(test_csv)456 assert test_model.trial_score < 70457def test_computer_accuracy_diff_image_total_score_three_trials():458 """459 Test that when an image is analyzed and compared to against a different460 image's joint positions, the accuracy is below 70%. This is done three461 different times to ensure that total score is properly updated and has462 a value less than the passing value for three total iterations.463 """464 test_model = HoleInTheCameraGame()465 test_image = cv2.imread("images/poses/second_mask.png")466 test_csv = "mask_joint_positions/first_mask.csv"467 for _ in range(3):468 test_model.analyze_frame(test_image)469 test_model.parse_for_joint_positions()470 test_model.compute_accuracy(test_csv)471 assert test_model.total_score < 210472def test_computer_accuracy_diff_image_trial_score_three_trials():473 """474 Test that when an image is analyzed and compared to against a different475 image's joint positions three different times, the accuracy is below 70%476 and the trial score only stores the accuracy of the most recent trial.477 """478 test_model = HoleInTheCameraGame()479 test_image = cv2.imread("images/poses/second_mask.png")480 test_csv = "mask_joint_positions/first_mask.csv"481 for _ in range(3):482 test_model.analyze_frame(test_image)483 test_model.parse_for_joint_positions()484 test_model.compute_accuracy(test_csv)485 assert test_model.trial_score < 70486def test_check_win_same_image():487 """488 This function checks to make sure the model correctly determines a win when489 an image is analyzed against joints generated from the same exact image.490 """491 test_model = HoleInTheCameraGame()492 test_image = cv2.imread("images/poses/first_mask.png")493 test_csv = "mask_joint_positions/first_mask.csv"494 test_model.analyze_frame(test_image)495 test_model.parse_for_joint_positions()496 test_model.compute_accuracy(test_csv)497 assert test_model.check_win()498def test_check_win_diff_image():499 """500 This function checks to make sure the model correctly determines a loss501 when an image is analyzed against joints generated from a different image.502 """503 test_model = HoleInTheCameraGame()504 test_image = cv2.imread("images/poses/second_mask.png")505 test_csv = "mask_joint_positions/first_mask.csv"506 test_model.analyze_frame(test_image)507 test_model.parse_for_joint_positions()508 test_model.compute_accuracy(test_csv)509 assert not test_model.check_win()510def test_check_win_two_iterations():511 """512 Tests to make sure the check_win function correctly determines a win513 followed by a loss in succession.514 """515 test_model = HoleInTheCameraGame()516 test_image = cv2.imread("images/poses/first_mask.png")517 test_csv = "mask_joint_positions/first_mask.csv"518 test_model.analyze_frame(test_image)519 test_model.parse_for_joint_positions()520 test_model.compute_accuracy(test_csv)521 if not test_model.check_win():522 assert False523 test_image = cv2.imread("images/poses/second_mask.png")524 test_model.analyze_frame(test_image)525 test_model.parse_for_joint_positions()526 test_model.compute_accuracy(test_csv)527 if test_model.check_win():528 assert False...

Full Screen

Full Screen

main_view.py

Source:main_view.py Github

copy

Full Screen

...62 lbl_frame = tk.Label(self.data_frame, text="Fetcher")63 lbl_frame.grid(row=0, column=0, columnspan=2, pady=20)64 lbl_frame.config(font=("Courier", 20))65 self.add_input_form(self.data_frame)66 def add_analyze_frame(self):67 self.analyze_frame = tk.Frame(self)68 self.analyze_frame.grid(row=0, column=1, padx=10, sticky='n')69 # Frame title70 lbl_frame = tk.Label(self.analyze_frame, text="Analyzer")71 lbl_frame.grid(row=0, column=0, columnspan=3, pady=20)72 lbl_frame.config(font=("Courier", 20))73 self.add_plot_frame(self.analyze_frame)74 def rm_analyze_frame(self):75 if self.analyze_frame is not None:76 self.analyze_frame.grid_forget()77 def rm_plot_frame(self):78 if self.plot_frame is not None:79 self.plot_frame.grid_forget()80 81 def add_plot_frame(self, frame):82 # Period Label83 self.plot_frame = tk.Frame(frame, bd=10, relief='raised')84 self.plot_frame.grid(row=1, column=0, columnspan=3, ipadx=5, ipady=5)85 lbl_period = tk.Label(self.plot_frame, text='Period')86 lbl_period.grid(row=1, column=0, padx=(10, 0))87 # Period Entry88 self.period_entry = tk.Entry(self.plot_frame)...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1import mwt_functions as mwt2import numpy as np3import cv2 as cv4# Get Test Video5# cap = cv.VideoCapture('scene1.mp4')6# video_temp = video_resize('F:/海浪测量/数据集/Pexels Videos 1757800.mp4')7cap = cv.VideoCapture("scene1.mp4")8last_frame = int(cap.get(cv.CAP_PROP_FRAME_COUNT))9frame_rate = int(cap.get(cv.CAP_PROP_FPS))10# Create some random colors11color = np.random.randint(0, 255, (100, 3))12# Grab First Frame w/ Grayscale Vers.13ret, old_frame = cap.read()14old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)15ret, old_thresh = cv.threshold(old_gray, 230, 255, cv.THRESH_BINARY)16# Create a mask image for drawing purposes17mask = np.zeros_like(old_frame)18mask[..., 1] = 25519# Create Initial Variables20frame_num = 121tracked_waves = []22recognized_waves = []23period = []24stable_wavelength = 025while True:26 # Get Current Frame & Grayscale Vers.27 ret, frame = cap.read()28 # Check if at end29 if not ret:30 break31 gray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)32 #33 # Preprocessing34 #35 # Threshold image to only show the white part of waves36 ret, thresh = cv.threshold(gray, 205, 255, cv.THRESH_BINARY)37 # Resize Image38 analyze_frame = cv.resize(thresh, None,fx=0.25,fy=0.25,interpolation=cv.INTER_AREA)39 # Denoise Thresholded image40 analyze_frame = cv.fastNlMeansDenoising(analyze_frame, 5)41 # Remove Small Artifacts42 analyze_frame = cv.morphologyEx(analyze_frame, cv.MORPH_OPEN, cv.getStructuringElement(cv.MORPH_RECT, (3, 3)))43 #44 # Get Waves (Detections)45 #46 sections = []47 #_, contours, hierarchy = cv.findContours(48 contours, hierarchy = cv.findContours(49 image=analyze_frame,50 mode=cv.RETR_EXTERNAL,51 method=cv.CHAIN_APPROX_NONE,52 hierarchy=None,53 offset=None)54 for contour in contours:55 if mwt.keep_contour(contour):56 sections.append(mwt.Section(contour, frame_num))57 #58 # Update Waves (Tracking)59 #60 wave_points = []61 wave_slopes = []62 for wave in tracked_waves:63 wave.update_searchroi_coors()64 #65 analyze_frame = wave.update_points(analyze_frame, 1/0.25)66 if wave.update_death(frame_rate, frame_num):67 print("Wave Traveled " + str(wave.travel_dist) +68 "m in " + str(wave.time_alive) + "s")69 if frame_num == last_frame:70 wave.death = frame_num71 if wave.update_centroid(frame_rate):72 period.append(frame_num)73 wave.update_boundingbox_coors()74 wave.update_displacement()75 wave.update_mass()76 wave.update_recognized()77 # Update Data Structures78 # frame = mwt.draw_line(frame, wave.slope, wave.intercept, 1 / 0.25)79 wave_points.append(wave.centroid)80 wave_slopes.append(wave.slope)81 dead_recognized_waves = [wave for wave in tracked_waves82 if wave.death is not None83 and wave.recognized is True]84 recognized_waves.extend(dead_recognized_waves)85 tracked_waves = [wave for wave in tracked_waves if wave.death is None]86 tracked_waves.sort(key=lambda x: x.birth, reverse=True)87 for wave in tracked_waves:88 other_waves = [wav for wav in tracked_waves if not wav == wave]89 if mwt.will_be_merged(wave, other_waves):90 wave.death = frame_num91 tracked_waves = [wave for wave in tracked_waves if wave.death is None]92 tracked_waves.sort(key=lambda x: x.birth, reverse=False)93 #94 # Calculate Stats (Recognition)95 #96 for section in sections:97 if not mwt.will_be_merged(section, tracked_waves):98 tracked_waves.append(section)99 # Draw detection boxes on original frame for visualization.100 frame = mwt.draw(tracked_waves, frame, 1 / 0.25)101 #102 # Show Updated Frames103 #104 # Calculate Wavelength (avg)105 avg_slope = np.mean(wave_slopes)106 wavelengths = [mwt.calc_dist_lines(j, i, avg_slope) for i, j in zip(wave_points[:-1], wave_points[1:])]107 avg_wavelength = np.round(np.mean(wavelengths), 2)108 if np.isnan(avg_wavelength) or avg_wavelength == 0:109 avg_wavelength = stable_wavelength110 else:111 stable_wavelength = avg_wavelength112 # Calculate Period113 # Get diff. between each wave reaching point114 if len(period) > 1:115 periods = [(j-i)/frame_rate for i, j in zip(period[:-1], period[1:])]116 avg_period = np.round(np.mean(periods), 2)117 temp_freq = np.round(1/avg_period, 2)118 main_stats = "Stats\nWavelength: {}m\nPeriod: {}s\nTemporal Frequency: {} wave/s"\119 .format(np.round(avg_wavelength,2), avg_period, temp_freq)120 else:121 main_stats = "Stats\nWavelength: {}m\nPeriod: n/a\nTemporal Frequency: n/a"\122 .format(avg_wavelength)123 for i, j in enumerate(main_stats.split('\n')):124 frame = cv.putText(125 frame,126 text=j,127 org=(20, (50 + i * 45)),128 fontFace=cv.FONT_HERSHEY_COMPLEX_SMALL,129 fontScale=1.5,130 color=(200, 200, 200),131 thickness=2,132 lineType=cv.LINE_AA)133 # Show Test Point used for Period134 frame = cv.circle(frame, (1040, 600), 4, (0,0,0), thickness=4)135 cv.imshow("original", frame)136 cv.imshow("thresholded", analyze_frame)137 if cv.waitKey(1) & 0xFF == ord('q'):138 break139 old_frame = frame.copy()140 old_thresh = analyze_frame.copy()141 frame_num = frame_num + 1142cap.release()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run ATX automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful