Best Python code snippet using fMBT_python
sift.py
Source:sift.py  
...68    logging.info("Thread %s: starting", name)69    videoCapture.run(shouldStop, **kwargs)70    logging.info("Thread %s: finishing", name)71# Starts the video capture72def startVideoCapture(name, **kwargs):73    global videoCaptureThread74    # Check if a video capture thread is already running75    if shouldStop.get() != 0:76        print("startVideoCapture(): Video capture is already running, not starting again")77        return78    # Start the video capture thread79    videoCaptureThread = thread_with_exception.thread_with_exception(name=name, target=videoCaptureThreadFunction, args=(name,), kwargs_=kwargs)80    videoCaptureThread.start()81def onSetVideoCapture(cap):82    customVideoCapture.setOrigVideoCap(cap)83def onFrame(frame):84    customVideoCapture.q.put(frame)85# https://docs.python.org/3/library/argparse.html86parser = argparse.ArgumentParser(description='The bad boy SIFT but implemented in python..')87parser.add_argument('--skip', type=int, nargs='+', default=0,88                    help="initial skip")89parser.add_argument('--show-preview-window', action='store_true', default=False,90                    help='whether to show the preview window')91parser.add_argument('--frameskip', type=int, nargs='+', default=1,92                    help="skip this number of frames after every frame processed")93# --save-first-image --video-file-data-source --video-file-data-source-path Data/fullscale1/Derived/SIFT/output.mp4 --no-sky-detection94parser.add_argument('--save-first-image', action='store_true', default=True,95                    help='whether to save the first image')96parser.add_argument('--video-file-data-source', action='store_true', default=False,97                    help='')98parser.add_argument('--video-file-data-source-path', type=str, nargs='+', default=None,99                    help="")100parser.add_argument('--no-sky-detection', action='store_true', default=False,101                    help='')102videoFilename = None103# https://stackoverflow.com/questions/12834785/having-options-in-argparse-with-a-dash104namespace=parser.parse_args() #vars(parser.parse_args()) # default is from argv but can provide a list here105print(namespace)106showPreviewWindow=namespace.show_preview_window107skip=namespace.skip108frameSkip=namespace.frameskip[0] if isinstance(namespace.frameskip, list) else namespace.frameskip # HACK109shouldRunSkyDetection=not namespace.no_sky_detection110videoFileDataSourcePath=namespace.video_file_data_source_path[0] if isinstance(namespace.video_file_data_source_path, list) else namespace.video_file_data_source_path # HACK111videoFileDataSource = namespace.video_file_data_source112# if videoFileDataSource:113#     forceStop=True114def runOnTheWayDown(capAPI, pSave):115    knn_matcher2.mode = 1116    knn_matcher2.grabMode = 1117    knn_matcher2.shouldRunSkyDetection = shouldRunSkyDetection118    knn_matcher2.shouldRunUndistort = True #False119    knn_matcher2.skip = skip120    knn_matcher2.videoFilename = None121    knn_matcher2.showPreviewWindow = showPreviewWindow122    knn_matcher2.reader = capAPI #capAPI if not videoFileDataSource else cv2.VideoCapture(videoFileDataSourcePath)123    knn_matcher2.frameSkip = frameSkip #40#1#5#10#20124    knn_matcher2.waitAmountStandard = 1 # (Only for showPreviewWindow == True)125    # knn_matcher2.nfeatures = 0126    # knn_matcher2.nOctaveLayers = 9127    # knn_matcher2.contrastThreshold = 0.03128    # knn_matcher2.edgeThreshold = 10129    # knn_matcher2.sigma = 0.8130    131    knn_matcher2.nfeatures = 0132    knn_matcher2.nOctaveLayers = 10133    knn_matcher2.contrastThreshold = 0.02134    knn_matcher2.edgeThreshold = 10135    knn_matcher2.sigma = 0.8136    knn_matcher2.sift = cv2.xfeatures2d.SIFT_create(knn_matcher2.nfeatures, knn_matcher2.nOctaveLayers, knn_matcher2.contrastThreshold, knn_matcher2.edgeThreshold, knn_matcher2.sigma)137    138    rets = knn_matcher2.run(pSave)139    return rets140if __name__ == '__main__':141    name = "SIFT_Cam"142    signal.signal(signal.SIGINT, signal_handler)143    144    now = datetime.now() # current date and time145    date_time = now.strftime("%m_%d_%Y_%H_%M_%S")146    o1=now.strftime("%Y-%m-%d_%H_%M_%S_%Z")147    outputFolderPath=os.path.join('.', 'dataOutput', o1)148    pSave = outputFolderPath149    150    try:151        # NOTE: first image given should match what the sky detector chooses so we re-set firstImage and firstImageFilename here152        if videoFileDataSource:153            capOrig=cv2.VideoCapture(videoFileDataSourcePath)154            startVideoCapture(name, capOrig=capOrig, onFrame=onFrame, fps=capOrig.get(cv2.CAP_PROP_FPS), onSetVideoCapture=onSetVideoCapture, outputFolderPath=pSave)155        else:156            startVideoCapture(name, onFrame=onFrame, fps=5, onSetVideoCapture=onSetVideoCapture, outputFolderPath=pSave)157        158        accMat, w, h, firstImage, firstImageOrig, firstImageFilename = runOnTheWayDown(customVideoCapture159            #cv2.VideoCapture(videoFilename) if videoFileDataSource else customVideoCapture160            , pSave)161    except knn_matcher2.EarlyExitException as e:162        accMat = e.acc163        w=e.w164        h=e.h165        firstImage=e.firstImage...gui.py
Source:gui.py  
...30last_frame1 = np.zeros((480, 640, 3), dtype=np.uint8)31global cap132show_text=[0]33# c:/users/rajus/pycharmprojects/pythonproject/venv/lib/site-packages34def startVideoCapture():35    start = timer()36    cap1 = cv2.VideoCapture(0)37    # width, height = 600, 50038    # cap1.set(cv2.CAP_PROP_FRAME_WIDTH, width)39    # cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, height)40    if not cap1.isOpened():                             41        print("cant open the camera1")42    flag1, frame1 = cap1.read()43    print("capture time"+str(start - timer()))44    frame1 = cv2.resize(frame1,(600,500))45    print("resize time" + str(start - timer()))46    # c:/users/rajus/pycharmprojects/pythonproject/47    bounding_box = cv2.CascadeClassifier('venv/lib/site-packages/cv2/data/haarcascade_frontalface_default.xml')48    gray_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)49    num_faces = bounding_box.detectMultiScale(gray_frame,scaleFactor=1.3, minNeighbors=5)50    print("bounding box time" + str(start - timer()))51    for (x, y, w, h) in num_faces:52        cv2.rectangle(frame1, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)53        roi_gray_frame = gray_frame[y:y + h, x:x + w]54        cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, (48, 48)), -1), 0)55        print("crop time" + str(start - timer()))56        prediction = emotion_model.predict(cropped_img)57        print("detection time" + str(start - timer()))58        59        maxindex = int(np.argmax(prediction))60        print(emotion_dict[maxindex])61        # cv2.putText(frame1, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)62        show_text[0]=maxindex63    if flag1 is None:64        print("Major error!")65    elif flag1:66        global last_frame167        last_frame1 = frame1.copy()68        pic = cv2.cvtColor(last_frame1, cv2.COLOR_BGR2RGB)     69        img = Image.fromarray(pic)70        imgtk = ImageTk.PhotoImage(image=img)71        lmain.imgtk = imgtk72        lmain.configure(image=imgtk)73        print("paint time 1" + str(start - timer()))74        paintDetectedEmotion()75        print("paint time" + str(start - timer()))76        lmain.after(750, startVideoCapture)77    if cv2.waitKey(1) & 0xFF == ord('q'):78        exit()79def paintDetectedEmotion():80    frame2=cv2.imread(emoji_dist[show_text[0]])81    pic2=cv2.cvtColor(frame2,cv2.COLOR_BGR2RGB)82    img2=Image.fromarray(pic2)83    imgtk2=ImageTk.PhotoImage(image=img2)84    lmain2.imgtk2=imgtk285    lmain3.configure(text=emotion_dict[show_text[0]],font=('arial',45,'bold'))86    lmain2.configure(image=imgtk2)87    # lmain2.after(10, paintDetectedEmotion)88if __name__ == '__main__':89    root=tk.Tk()90    # img = ImageTk.PhotoImage(Image.open("logo.png"))91    # heading = Label(root,image=img,bg='black')92    #93    # heading.pack()94    # heading2=Label(root,text="Photo to Emoji",pady=20, font=('arial',45,'bold'),bg='black',fg='#CDCDCD')95    #96    # heading2.pack()97    lmain = tk.Label(master=root,padx=50,bd=10)98    lmain2 = tk.Label(master=root,bd=10)99    lmain3=tk.Label(master=root,bd=10,fg="#CDCDCD",bg='black')100    lmain.pack(side=LEFT)101    lmain.place(x=50,y=50)102    lmain3.pack()103    lmain3.place(x=960,y=50)104    lmain2.pack(side=RIGHT)105    lmain2.place(x=900,y=150)106    root.title("Photo To Emoji")107    root.geometry("1400x900+100+10")108    root['bg']='black'109    exitbutton = Button(root, text='Quit',fg="red",command=root.destroy,font=('arial',25,'bold')).pack(side = BOTTOM)110    startVideoCapture()...camSub.py
Source:camSub.py  
...22232425"""26def startVideoCapture():27        #pub  = rospy.Publisher("webcam",Image,queue_size = 1)28        #rospy.init_node("spektrometer",anonymous = False)29        #rate = rospy.Rate(10)30        """31        while not rospy.is_shutdown():32            ret,frame  =cap.read()33            if not ret:34                break35            msg = bridge.cv2_to_imgmsg(frame,"bgr8")36            pub.publish(msg)37            38            if cv2.waitkey(1) & 0xFF == ord('q')39                break40            if rospy.is_shutdown():41                cap.release()42                43        44        if __name__ == '__main__':45            try:46                talker()47            except rospy.ROSInterruptException:48                pass49        50        51        52        53        54        """55    56        57        
...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
