How to use touch_text method in robotframework-androidlibrary

Best Python code snippet using robotframework-androidlibrary_python

on_world_touch_demo.py

Source:on_world_touch_demo.py Github

copy

Full Screen

1#!/usr/bin/env python2'''3example to show optical flow4USAGE: video_correlation.py [<video_source>]5Keys:6 ESC - exit7'''8# Python 2/3 compatibility9from __future__ import print_function10import numpy as np11import cv2 as cv12# from cv2 import cuda13import EasyPySpin14#transforms15import queue16import mediapipe as mp17import sys18np.random.seed(42)19#global parameters20touch_threshold = 5 #vector magnitude. The lower, the more sensitive detection21centripetal_threshold = 2 #vector variance. The higher, the less possible hand motion is detected as speckle motion (false positive)22forceScale = int(5) #scalar. The bigger, the longer vectors look. does not affect flow23lineWidth = int(5) # line width of vectors.24mp_hands = mp.solutions.hands25mp_drawing = mp.solutions.drawing_utils26mp_drawing_styles = mp.solutions.drawing_styles27#configuration28 #laser torch + diffuser, on partition: touch_threshold = 3, centripetal_threshold = 129 #laser projector + concave only, on partition: touch_threshold = 3, centripetal_threshold = 130 #laser projector + concave + diffuser, on partition: touch_threshold = 3.5, centripetal_threshold = 2. can cover larger area, but SNR is lower -> var is large when stepping on floor.31#laser projector + concave only, on wall: active_threshold = 6, touch_threshold = 7, centripetal_threshold = 0.532def draw_touching_point(wrist, index, width, height):33 blank_image = np.zeros((int(height),int(width),3), np.uint8)34 35 #drawing parameters36 white = (255,255,255)37 wrist_co = (int(wrist.x*width),int(wrist.y*height))38 # print('wrist',wrist_co)39 index_co = (int(index.x*width),int(index.y*height))40 coeff = 341 ellipse_center = (int(coeff*wrist_co[0] + (1-coeff)*index_co[0]),42 int(coeff*wrist_co[1] + (1-coeff)*index_co[1]))43 longeraxis = int(np.sqrt((index_co[0]-ellipse_center[0])**244 +(index_co[1]-ellipse_center[1])**2))45 axesLength = (longeraxis,30)46 angle = np.arctan2(wrist_co[1]-index_co[1],wrist_co[0]-index_co[0])47 angle = int(angle*180/np.pi)48 # print(angle)49 # # wrist50 # image = cv.circle(image, wrist_co, 5, (0,255,0), 5)51 # # index_fingertip52 # image = cv.circle(image, index_co, 5, (0,255,0), 5)53 #ellipse54 blank_image = cv.ellipse(blank_image, ellipse_center,axesLength,angle,0,360,white,-1)55 cropped_image = blank_image[97:394,119:515]56 dim = (2048,1536)57 resized = cv.resize(cropped_image, dim, interpolation = cv.INTER_AREA)58 resized = cv.cvtColor(resized,cv.COLOR_BGR2GRAY).astype("float32")59 return resized, index_co60def nothing(x):61 pass62def cart2pol(z):63 rho = np.sqrt(z[0]**2 + z[1]**2)64 phi = np.arctan2(z[1],z[0])65 return(rho, phi)66def touch_detection(flow,yy,xx,isTouched,img_mask):67 y, x = flow.shape[:2] 68 69 isEnter = False70 isExit = False71 flow_array_original = flow72 flow_array = []73 for iy in range(y):74 for ix in range(x):75 if img_mask[yy[iy],xx[ix]] == 0:76 mag, angle = cart2pol(flow_array_original[iy][ix])77 if mag > touch_threshold:78 flow_array.append([mag,angle])79 80 if flow_array == []:81 flow_array.append([0,0])82 flow_array = np.array(flow_array)83 flow_array_var = np.var(flow_array[:,1])84 flow_array_magnitude = flow_array[:,0].mean()85 if flow_array_magnitude > touch_threshold and flow_array_var > centripetal_threshold:86 touch_text = "Speckle motion: touched" 87 if (isTouched == False):88 isEnter = True89 isTouched = True90 else:91 touch_text = "Speckle motion: no touch" 92 if (isTouched == True):93 isExit = True94 isTouched = False95 96 # touch_text = "var:"+str(round(flow_array_var,2))+" mag:"+str(round(flow_array_magnitude,2))97 return touch_text, isTouched, isEnter, isExit98def draw_flow(img, flow,yy,xx,touch_text,img_mask):99 h, w = img.shape[:2]100 y, x = flow.shape[:2] 101 mask = np.zeros((h,w,3), np.uint8)102 103 for iy in range(y):104 for ix in range(x):105 if img_mask[yy[iy],xx[ix]] == 0:106 start_point = (xx[ix],yy[iy])107 end_point = (xx[ix]+forceScale*int(flow[iy][ix][0]),yy[iy]+forceScale*int(flow[iy][ix][1]))108 # print(start_point,end_point)109 cv.arrowedLine(mask, start_point,end_point, (0, 255, 0),lineWidth)110 masked_image = cv.addWeighted(img,1,mask,1,0)111 112 #test display113 font = cv.FONT_HERSHEY_SIMPLEX114 # org115 org = (600, 100)116 # fontScale117 fontScale = 2118 # Blue color in BGR119 color = (0, 255, 0)120 # Line thickness of 2 px121 thickness = 5122 123 masked_image = cv.putText(masked_image, touch_text, org, font, 124 fontScale, color, thickness, cv.LINE_AA)125 return masked_image126cv.namedWindow('Touch Detection',cv.WINDOW_NORMAL)127def main():128 try:129 fn = sys.argv[1]130 cam = cv.VideoCapture(fn)131 mode = 'video'132 except IndexError:133 fn = 0134 mode = 'camera'135 cam = EasyPySpin.VideoCapture(0)136 cap = cv.VideoCapture(0, cv.CAP_DSHOW)137 138 _ret, prev = cam.read()139 gpu_previous = cv.cuda_GpuMat()140 gpu_previous.upload(prev)141 142 interval = 1143 while (cam.isOpened() == False):144 _ret, prev = cam.read()145 gpu_previous.upload(prev)146 147 frameWidth = int(cam.get(cv.CAP_PROP_FRAME_WIDTH))148 frameHeight = int(cam.get(cv.CAP_PROP_FRAME_HEIGHT))149 width = cap.get(cv.CAP_PROP_FRAME_WIDTH)150 height = cap.get(cv.CAP_PROP_FRAME_HEIGHT)151 print(width, height)152 gpu_cur = cv.cuda_GpuMat()153 gpu_ref = cv.cuda_GpuMat()154 gpu_ref = gpu_previous155 count = 0156 no_row_grids = 15#30157 no_col_grids = 30#60158 flow = np.zeros([no_row_grids,no_col_grids,2])159 # flow_val = []160 nc = frameWidth # 2048161 nr = frameHeight # 1536162 vel_est_delta = 1 #delta of frames over which velocity is estimated163 yy = np.arange(0,nr,int(nr/no_row_grids))164 xx = np.arange(0,nc,int(nc/no_col_grids))165 queue_vel = queue.Queue(vel_est_delta)166 intensity_mask = cv.imread('29/29_mask.png')167 intensity_mask = cv.cvtColor(intensity_mask,cv.COLOR_BGR2GRAY).astype("float32")168 img_mask = 255*intensity_mask169 img_mask = img_mask.clip(0, 255).astype("uint8")170 UI = np.zeros((480,640,3), np.uint8) #297,396171 UI.fill(50)172 isTouched = False173 isEnter = False174 isExit = False175 with mp_hands.Hands(176 model_complexity=0,177 min_detection_confidence=0.5,178 min_tracking_confidence=0.5) as hands:179 while (cam.isOpened()):180 _ret, img = cam.read()181 gpu_cur.upload(img)182 success, image = cap.read()183 if not success:184 print("Ignoring empty camera frame.")185 # If loading a video, use 'break' instead of 'continue'.186 break187 # To improve performance, optionally mark the image as not writeable to188 # pass by reference.189 image.flags.writeable = False190 image = cv.cvtColor(image, cv.COLOR_BGR2RGB)191 results = hands.process(image)192 # Draw landmark annotation on the image.193 image.flags.writeable = True194 image = cv.cvtColor(image, cv.COLOR_RGB2BGR)195 # print(bool(results.multi_hand_landmarks))196 if results.multi_hand_landmarks:197 for hand_landmarks in results.multi_hand_landmarks:198 # mp_drawing.draw_landmarks(199 # image,200 # hand_landmarks,201 # mp_hands.HAND_CONNECTIONS,202 # mp_drawing_styles.get_default_hand_landmarks_style(),203 # mp_drawing_styles.get_default_hand_connections_style())204 hand_mask, index_co = draw_touching_point(hand_landmarks.landmark[0],205 hand_landmarks.landmark[8],width,height)206 # print(index_co,'index_co')207 #GREEN208 # image = cv.circle(image, index_co, 5, (0,255,0), 5)209 210 #RED211 if isEnter == True:212 UI = cv.circle(UI, index_co, 10, (0,0,255), -1)213 print('red',index_co)214 # if isExit == True:215 # UI.fill(50)216 img_mask = 255*(intensity_mask+hand_mask)217 img_mask = img_mask.clip(0, 255).astype("uint8")218 219 cv.imshow('Mask', cv.resize(img_mask, (640,480), interpolation = cv.INTER_AREA))220 cv.imshow('UI', cv.resize(UI[97:394,119:515], (800,600), interpolation = cv.INTER_AREA))221 222 223 try:224 gpu_flow_create = cv.cuda_FarnebackOpticalFlow.create(3, 0.5, False, 15, 3, 5, 1.2, 0,)225 gpu_flow = cv.cuda_FarnebackOpticalFlow.calc(gpu_flow_create,gpu_ref,gpu_cur,None,)226 except IndexError:227 continue228 flow = gpu_flow.download()229 flow_sparse = flow[::int(nr/no_row_grids),::int(nc/no_col_grids),:]230 count = count + interval231 232 cv.imshow('Webcam',image)233 if count>=2*vel_est_delta:234 touch_text, isTouched, isEnter, isExit = touch_detection(flow_sparse,yy,xx,isTouched,img_mask)235 display_img = draw_flow(cv.cvtColor(img,cv.COLOR_GRAY2BGR),236 flow_sparse,yy,xx,touch_text,img_mask)237 cv.imshow('Touch Detection',display_img)238 239 gpu_ref.upload(img)240 ch = cv.waitKey(5)241 if ch == 27:242 break243 if ch == ord('p'):244 cv.waitKey(-1)#pause245 if ch == ord('z'):246 UI.fill(50)247 print('Done. frame count:',count)248if __name__ == '__main__':249 print(__doc__)250 main()...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1from gwc_python.core import GestureWorksCore2from gwc_python.GWCUtils import TOUCHREMOVED, TOUCHADDED3from direct.gui.OnscreenImage import OnscreenImage4from direct.gui.OnscreenText import OnscreenText5from direct.showbase.ShowBase import ShowBase6from direct.task import Task7from pandac.PandaModules import LineSegs 8from pandac.PandaModules import deg2Rad9from pandac.PandaModules import NodePath10from pandac.PandaModules import Vec311from panda3d.core import TransparencyAttrib12from panda3d.core import TextNode13SCREEN_WIDTH = 192014SCREEN_HEIGHT = 108015class MultitouchApp(ShowBase):16 17 def __init__(self, gw):18 self.gw = gw19 self.active_points = {}20 self.touch_images = {}21 self.touch_text = {}22 23 ShowBase.__init__(self)24 self.build()25 self.taskMgr.add(self.updateGestureworks, "updateGestureworksTask")26 27 def processTouchEvents(self, touches):28 for touch in touches:29 if touch.status != TOUCHREMOVED:30 self.active_points.update({touch.point_id: touch})31 else:32 self.active_points.pop(touch.point_id)33 34 def clearScreen(self):35 for single_image in self.touch_images:36 self.touch_images[single_image].destroy()37 for single_text in self.touch_text:38 self.touch_text[single_text].destroy() 39 40 def drawTouchPoints(self):41 self.win42 for touch in self.active_points.values():43 44 # We need to convert Gestureworks coordinates to Panda3D coordinates45 touch_x = float((touch.position.x - SCREEN_WIDTH/2) / SCREEN_WIDTH) * 446 touch_y = float((SCREEN_HEIGHT/2 - touch.position.y) / SCREEN_HEIGHT) * 247 if touch.status != TOUCHREMOVED:48 # Draw circles49 self.touch_images[touch.point_id] = OnscreenImage('ring_black.png', pos=(touch_x,0,touch_y), scale=.05)50 self.touch_images[touch.point_id].setTransparency(TransparencyAttrib.MAlpha)51 52 # Draw the touchpoint info53 label = 'ID: %d\nX: %d | Y: %d' %(touch.point_id,touch.position.x,touch.position.y)54 self.touch_text[touch.point_id] = OnscreenText(label,pos=(touch_x+0.1,touch_y), scale=.05,align=TextNode.ALeft) 55 56 def updateGestureworks(self, task): 57 self.gw.processFrame()58 point_events = gw.consumePointEvents()59 self.processTouchEvents(point_events)60 self.clearScreen();61 self.drawTouchPoints()62 return Task.cont63 64 def build(self):65 if not self.gw.registerWindow('Panda'):66 print('Unable to register touch window')67 exit()68 69if __name__ == '__main__':70 gw = GestureWorksCore('C:\\GestureworksCore\\GestureworksCore32.dll')71 if not gw.loaded_dll: 72 print 'Unable to load GestureWorksCore'73 exit()74 gw.initializeGestureWorks(SCREEN_WIDTH, SCREEN_HEIGHT)75 app = MultitouchApp(gw)...

Full Screen

Full Screen

config.py

Source:config.py Github

copy

Full Screen

1import os2# config3NUM_BLOCKS = 14SHUFFLE_BAGS = True5# Parameter for randomize the number of balloons per pop range (aka bag)6RANDOMIZE_BALLOON_NUM = False7BALLOON_SETUP = [{'range': [0, 8], 'number_of_balloons':8},8 {'range': [0, 16], 'number_of_balloons':8},9 {'range': [8, 16], 'number_of_balloons':8}]10PRACTICE_SETUP = [{'range': [10,10], 'number_of_balloons':0},11 {'range': [3,3], 'number_of_balloons':0},12 {'range': [6,6], 'number_of_balloons':0}]13NUM_BALLOONS = 1814NUM_BAGS = len(BALLOON_SETUP)15BALLOONS_PER_BAG = NUM_BALLOONS/NUM_BAGS16TOUCH=False17TOUCH_TEXT = ["Touch left side\n","Touch right side\n"]18TOUCH_INST = ['left side of the screen', 'right side of the screen']19TASK_DIR = "."20INST2_IMG_PATH = os.path.join("inst", "INST2.png")21RESP_KEYS = ['F', 'J']22CONT_KEY = ['SPACEBAR']23CONT_KEY_STR = "Spacebar"24KEY_TEXT = RESP_KEYS25REWARD_LOW = 0.0526REWARD_HIGH = 0.2527#starting value in bank28GRAND_TOTAL = 1.0029RST_WIDTH = 60030FEEDBACK_TIME = 0.7531ISI = 0.2532INTER_PUMP_DURATION = 0.033REWARD_SLIDE_DURATION = 0.2534PUMP_DURATION = 0.2535COLLECT_DURATION = 0.536BALLOON_GROWTH_DURATION = 0.237POP_ANIMATION_DURATION = 1.038BALLOON_START_SIZE = 10039BALLOON_EXPLODE_SIZE = (500, 500)40FLIP_BART = False41INC_BALLOON_SIZE = 542TRIAN_SIZE = 1043CROSS_COLOR = (1.0, 1.0, 1.0, 1.0)44CROSS_FONTSIZE = 9045BANK_WIDTH = 15046BANK_HEIGHT = 15047POP_SIZE = (600, 600)48AIR_PUMP_WIDTH = 10049AIR_PUMP_HEIGHT = 10050NOZZLE_WIDTH = 451NOZZLE_HEIGHT = 4052FEEDBACK_FONT_SIZE = 9053INST_FONT_SIZE = 2054CROSS_FONT_SIZE = 7555FONT_SIZE = 3056LABEL_FONT_SIZE = 3057SKIP_SIZE = [200, 50]58SKIP_FONT_SIZE = 2259# font sizes for labels60TOTAL_FONT_SIZE = 3561TRIAL_FONT_SIZE = 3062FMRI = False63FMRI_TR = ["5"]64FMRI_TECH_KEYS = ['ENTER']65FMRI_TR_DUR = .866INIT_TR_WAIT = 6.067POST_TR_WAIT = 16.068POST_CHECK_TR_DUR = 3.0*FMRI_TR_DUR69TIME_BETWEEN_HAPPY = 1570TIME_JITTER_HAPPY = 1071HAPPY_FONT_SIZE = 2572HAPPY_INC_BASE = .0273HAPPY_INC_START = .274HAPPY_MOD = 20.75HAPPY_RANGE = 1076NON_PRESS_INT = .177PRESS_INT = .01678SLIDER_WIDTH = 100079RESP_HAPPY = ["F", "J"]80EEG = False...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run robotframework-androidlibrary automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful