Best Python code snippet using tempest_python
Enemy.py
Source:Enemy.py  
...140        frame = int(self.time / (Enemy.CYCLE / ENEMY_IMAGE_LENGTH))141        if frame != self.frame:142                self.frame = frame143                if (self.face == 'r'):144                    self.update_image(self.IMAGES_RIGHT)145                elif (self.face == 'u'):146                    self.update_image(self.IMAGES_BACK)147                elif (self.face == 'l'):148                    self.update_image(self.IMAGES_LEFT)149                elif (self.face == 'd'):150                    self.update_image(self.IMAGES_FRONT)151                elif(self.face == 'rs'):152                    self.image = self.IMAGES_RIGHT[0]153                elif(self.face == 'us'):154                    self.image = self.IMAGES_BACK[0]155                elif(self.face == 'ls'):156                    self.image = self.IMAGES_LEFT[0]157                elif(self.face == 'ds'):158                    self.image = self.IMAGES_FRONT[0]159                else:160                    self.image = self.front_image.convert_alpha()161        self.dirty = 1162    def set_face(self, Enemy_face):163        if Enemy_face == 'u':164            self.update_image(self.IMAGES_FRONT)165            self.face = 'd'166        elif Enemy_face == 'd':167            self.update_image(self.IMAGES_BACK)168            self.face = 'u'169        elif Enemy_face == 'l':170            self.update_image(self.IMAGES_RIGHT)171            self.face = 'r'172        elif Enemy_face == 'r':173            self.update_image(self.IMAGES_LEFT)174            self.face = 'l'175        elif(self.face == 'rs'):176            self.update_image(self.IMAGES_LEFT)177        elif(self.face == 'us'):178            self.update_image(self.IMAGES_FRONT)179        elif(self.face == 'ls'):180            self.update_image(self.IMAGES_RIGHT)181        elif(self.face == 'ds'):182            self.update_image(self.IMAGES_BACK)183    def load_images_helper(self, imageArray, sheet):184        # key = sheet.get_at((0,0))185        # hereeeeee186        alphabg = (23, 23, 23)187        for i in range(0, 4):188            surface = PG.Surface((100, 100))189            surface.set_colorkey(alphabg)190            surface.blit(sheet, (0, 0), (i*100, 0, 100, 100))191            imageArray.append(surface)192        return imageArray193    # this will all end up in the key handler194    def update_image(self, imageArray):195        try:196            self.image = imageArray[self.frame].convert_alpha()197        except IndexError:198            self.image = self.front_image.convert_alpha()...camera_tool.py
Source:camera_tool.py  
...22K = 10723L = 10824def normalize_in_range(value, min, max, b_range):25    return (((value - 0) * (max - min)) / (b_range - 0)) + min26def update_image(val):27    record_params = cv2.getTrackbarPos('Record params', title_window)28    fp = cv2.getTrackbarPos('Focal length', title_window)29    tilt_angle = cv2.getTrackbarPos('Tilt angle', title_window)30    pan_angle = cv2.getTrackbarPos('Pan angle', title_window)31    roll_angle = cv2.getTrackbarPos('Roll angle', title_window)32    xloc = cv2.getTrackbarPos('Camera loc x', title_window)33    yloc = cv2.getTrackbarPos('Camera loc y', title_window)34    zloc = cv2.getTrackbarPos('Camera loc z', title_window)35    fp = normalize_in_range(fp, 1000, 6000, bar_range)36    xloc = normalize_in_range(xloc, 46.2, 57.2, bar_range)37    yloc = normalize_in_range(yloc, -66.07020, -16.74178, bar_range)38    zloc = normalize_in_range(zloc, 10.1387, 23.01126, bar_range)39    zloc = yloc * (-.4)40    tilt_angle = normalize_in_range(tilt_angle, -20., -5., bar_range)41    pan_angle = normalize_in_range(pan_angle, -60., 60., pan_bar_range)42    roll_angle = normalize_in_range(roll_angle, -1., 1., bar_range)43    params = np.array([44        image_center_x,45        image_center_y,46        fp,47        tilt_angle,48        pan_angle,49        roll_angle,50        xloc,51        yloc,52        zloc53    ])54    if record_params == 1:55        camera_samples.append(params)56    base_rotation = RotationUtil.rotate_y_axis(0) @ RotationUtil.rotate_z_axis(roll_angle) @ \57                    RotationUtil.rotate_x_axis(-90)58    pan_tilt_rotation = RotationUtil.pan_y_tilt_x(pan_angle, tilt_angle)59    rotation = pan_tilt_rotation @ base_rotation60    rot_vec, _ = cv2.Rodrigues(rotation)61    tilt, pan, roll = rot_vec[0].item(), rot_vec[1].item(), rot_vec[2].item()62    camera_params = np.array([63        image_center_x,64        image_center_y,65        fp,66        tilt,67        pan,68        roll,69        xloc,70        yloc,71        zloc72    ])73    camera = Camera(camera_params)74    homography = camera.homography()75    edge_map = edge_map_from_homography(homography,76                                        binary_court,77                                        image_resolution)78    # im = cv2.imread('court.jpg')79    # edge_map2 = cv2.warpPerspective(im, homography, image_resolution, flags=cv2.INTER_LINEAR)80    text = f"focal length: {round(camera.focal_length, 3)} \n" \81           f"cam_loc_X: {round(camera.camera_center_x, 3)} \n" \82           f"cam_loc_Y: {round(camera.camera_center_y, 3)} \n" \83           f"cam_loc_Z: {round(camera.camera_center_z, 3)} \n" \84           f"tilt: {round(tilt_angle, 3):.3f} \n" \85           f"pan: {round(pan_angle, 3):.3f} \n" \86           f"roll: {round(roll_angle, 3):.3f} \n"87    y0, dy = 30, 2088    for i, line in enumerate(text.split('\n')):89        y = y0 + i * dy90        cv2.putText(edge_map, line, (20, y),91                    cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255))92    cv2.putText(edge_map, f'{np.round(homography[0,0], 3):<8} {np.round(homography[0,1], 3):=10} {np.round(homography[0,2], 3):>10}', (900, 30), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255))93    cv2.putText(edge_map, f'{np.round(homography[1,0], 3):<8} {np.round(homography[1,1], 3):=10} {np.round(homography[1,2], 3):>10}', (900, 50), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255))94    cv2.putText(edge_map, f'{np.round(homography[2,0], 3):<8} {np.round(homography[2,1], 3):=10} {np.round(homography[2,2], 3):>10}', (900, 70), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255))95    cv2.circle(edge_map, (int(camera.image_center_x), int(camera.image_center_y)), 2, (0, 255, 0), 3)96    cv2.putText(edge_map, f'Samples:{len(camera_samples)}', (600, y0), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255))97    cv2.imshow(title_window, edge_map)98def save_camera_samples():99    samples = np.array(camera_samples)100    timestamp = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")101    num_of_samples = len(camera_samples)102    filename = f'saved_camera_param_data/{timestamp}-{num_of_samples}.npy'103    np.save(filename, samples)104if __name__ == '__main__':105    image_resolution = (1280, 720)106    image_center_x = image_resolution[0]/2107    image_center_y = image_resolution[1]/2108    focal_point = 0109    tilt_angle = int(bar_range / 2)110    pan_angle = int(pan_bar_range / 2)111    roll_angle = int(bar_range / 2)112    camera_loc_x = int(bar_range * .57)113    camera_loc_y = int(bar_range / 2)114    camera_loc_z = int(bar_range / 2)115    record_params = 0116    title_window = 'Camera Tool'117    binary_court = sio.loadmat('worldcup2014.mat')118    cv2.namedWindow(title_window)119    cv2.createTrackbar('Record params', title_window, record_params, 1, update_image)120    cv2.createTrackbar('Focal length', title_window, focal_point, bar_range, update_image)121    cv2.createTrackbar('Tilt angle', title_window, tilt_angle, bar_range, update_image)122    cv2.createTrackbar('Pan angle', title_window, pan_angle, pan_bar_range, update_image)123    cv2.createTrackbar('Roll angle', title_window, roll_angle, bar_range, update_image)124    cv2.createTrackbar('Camera loc x', title_window, camera_loc_x, bar_range, update_image)125    cv2.createTrackbar('Camera loc y', title_window, camera_loc_y, bar_range, update_image)126    cv2.createTrackbar('Camera loc z', title_window, camera_loc_z, bar_range, update_image)127    update_image(1)128    while 1:129        key = cv2.waitKey(0)130        if key == Q:    # quit131            break132        elif key == W:133            val = cv2.getTrackbarPos('Tilt angle', title_window)134            cv2.setTrackbarPos('Tilt angle', title_window, val+1)135            update_image(1)136        elif key == S:137            val = cv2.getTrackbarPos('Tilt angle', title_window)138            cv2.setTrackbarPos('Tilt angle', title_window, val - 1)139            update_image(1)140        elif key == A:141            val = cv2.getTrackbarPos('Pan angle', title_window)142            cv2.setTrackbarPos('Pan angle', title_window, val - 1)143            update_image(1)144        elif key == D:145            val = cv2.getTrackbarPos('Pan angle', title_window)146            cv2.setTrackbarPos('Pan angle', title_window, val + 1)147            update_image(1)148    if len(camera_samples) > 0:149        save_camera_samples()150        print('Camera samples saved!')...Image.py
Source:Image.py  
1"""2#####################################3## IT 441 Computer Graphics4## Instructor:Nitin Raje5## Implemented By: Shalin Shah6## ID : 2011011797#####################################8"""910from PIL import Image11from PIL import ImageChops12from PIL import ImageFilter13import webbrowser14from PIL import ImageDraw   1516def genTriangle(data, steps, update_image, k):17    '''18    The triangle and it's subtriangle is calculated here.19    '''20    # draw triangles each step through21    update_image.line((data[0], data[1]))22    update_image.line((data[1], data[2]))23    update_image.line((data[0], data[2]))24    25    # next triangle formed by connecting the midpoints of each of the sides26    x1 = (data[0][0] + data[1][0]) / 227    y1 = (data[0][1] + data[1][1]) / 228    29    x2 = (data[1][0] + data[2][0]) / 230    y2 = (data[1][1] + data[2][1]) / 231    32    x3 = (data[2][0] + data[0][0]) / 233    y3 = (data[2][1] + data[0][1]) / 234    35    # updates data in next recursion36    data2 = ((x1, y1), (x2, y2), (x3, y3))37    38    # loop through until step limit is reached39    k += 140    if k <= steps:41        genTriangle((data[0], data2[0], data2[2]), steps, update_image, k)42        genTriangle((data[1], data2[0], data2[1]), steps, update_image, k)43        genTriangle((data[2], data2[1], data2[2]), steps, update_image, k)4445def draw(image):46    return ImageDraw.Draw(image)4748# higher steps gives more detail49# test with values of 1 to 1050steps = 65152# the three x,y data points for the starting equilateral triangle53data = ((0, 400), (400, 400), (200, 0))5455# picture canvas creation uses size tuple given in data[1]56size = data[1]57picture = Image.new('1', size, color="red")58update_image = draw(picture)5960# draw the triangle and calculate next triangle corner coordinates61genTriangle(data, steps, update_image, 0)6263# save the final image file and then view with an image viewer64imagename = "TRIANGLE_ORIGINAL.jpg"65picture.save(imagename);6667im1 = Image.open("TRIANGLE_ORIGINAL.jpg")6869im2 = im1.filter(ImageFilter.BLUR)70im2.save('TRIANGLE_BLURRED.jpg',"JPEG")7172ImageChops.invert(im1)73im3 = im1.filter(ImageFilter.EMBOSS)74im3.save('TRIANGLE_EMBOSSED_INVERTED.jpg',"JPEG")
...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
