How to use test_gesture method in uiautomator

Best Python code snippet using uiautomator

model.py

Source:model.py Github

copy

Full Screen

1import csv2import time3import matplotlib.pyplot as plt4import features5import knn6import sys7FNAME_IN_PREFIX = "../data/2022-03-22"8FNAME_OUT_PREFIX = "model/2022-03-22"9#TNUM = 510#GESTURES = [["LP", "TA", "TLFO", "TIFO", "TLFE", "TIFE", "TIFE", "LFE", "IFE", "TE", "WF", "WE", "FS", "FP", "HO", "HC", "HR"]11GESTURES = ["HO", "HR", "HC", "TIFE"]12DIM_NUM = 613feature_sets = []14def load_feature_sets_from_file():15 fsets = []16 try:17 for g in GESTURES:18 fsets.append(list(csv.reader(open("{}/model_{}_1.csv".format(FNAME_OUT_PREFIX, g.lower())))))19 except Exception as e:20 print("Error loading {}: {}".format(g, e))21 return None22 return fsets23def get_feature_sets(force_remodel=False):24 print("Loading feature sets")25 26 if not force_remodel:27 fsets = load_feature_sets_from_file()28 if fsets:29 print("Loaded feature sets from file successfully")30 return fsets31 fsets = []32 for g in GESTURES:33 pnum = 1 # participant34 print("Modelling {}...".format(g))35 f = []36 for tnum in range(TNUM):37 f += features.get_features(features.import_data("{}/p{}_{}_t{}.csv".format(FNAME_IN_PREFIX, pnum, g.lower(), tnum + 1)), g)38 features.write_features_to_csv("{}/model_{}.csv".format(FNAME_OUT_PREFIX, g.lower(), tnum + 1), f)39 fsets.append(f)40 return load_feature_sets_from_file()41def load_test_points(test_gesture):42 return list(csv.reader(open("{}/model_{}_1.csv".format(FNAME_OUT_PREFIX, test_gesture.lower()))))43# 9s to model everything and write to csv44# response time should be under 300ms45# TODO FIX THIS46#feature_sets = get_feature_sets(force_remodel=False)47def calc_certainty(label, kn):48 return round([p[0] for p in kn].count(label) / len(kn), 2)49feature_sets = []50print("Generating model...")51for g in GESTURES:52 # Generate features53 f = features.get_features(features.import_data("{}/{}.csv".format(FNAME_IN_PREFIX, g.lower())), g.lower(), DIM_NUM)54 55 # Save model56 features.write_features_to_csv("{}/model_{}_{}.csv".format(FNAME_OUT_PREFIX, g.lower(), 1), f)57 feature_sets.append(f)58# TODO: maybe implement weighting59K = 360bar = False61realtime = True62WINDOW_LEN_MS = int(256 / 1260 * 1000)63knn_data = []64for i in range(len(GESTURES)):65 knn.add_training_data(knn_data, feature_sets[i], GESTURES[i])66#for test_gesture in GESTURES:67test_gesture = "test2"68test_points = load_test_points(test_gesture)69start = time.perf_counter()70classification_results = []71classification_bests = [("TIFE", 1)]72classification_filtered = ["TIFE"]73DELAY_MS = 240074#for g in GESTURES:75 #classification_results[g] = 076test_len = 10977if realtime:78 input("Press any key to start real-time classification...")79for i in range(test_len):80 ps = time.perf_counter()81 k_nearest = knn.get_k_nearest(knn_data, test_points[i], K)82 label = knn.calc_mode(k_nearest)83 classification_results.append(k_nearest)84 certainty = calc_certainty(label, k_nearest) 85 if realtime:86 time.sleep((WINDOW_LEN_MS - (time.perf_counter() - ps)) / 1000)87 # filter88 cur_label = classification_filtered[-1]89 accepted = False90 if i >= 2 and certainty >= 1:91 # TIFE is rarely misclassified92 if label == "TIFE":93 accepted = True94 # As gesture recognition is mostly identified throughout the initial transition into the gesture, indentified periods of rest are normal throughout the gesture and can be ignored95 elif label == "HR":96 accepted = False97 # For other gestures, ensure that the previous matches98 elif label != classification_filtered[-1]:99 if (label == classification_bests[-1][0] and classification_bests[-1][1] >= 0.6):100 accepted = True101 else:102 accepted = False103 else:104 accepted = False105 if accepted:106 ms = (i + 1) * WINDOW_LEN_MS107 print("[{}]: {}".format(ms, label))108 classification_filtered.append(label)109 else:110 classification_filtered.append(cur_label)111 classification_bests.append((label, certainty))112 #print("{} ({}%)".format(label, int(certainty * 100)))113 sys.stdout.flush()114end = time.perf_counter()115total_num = len(test_points)116duration_ms = (end - start) * 1000117print("{}ms of input data processed".format(WINDOW_LEN_MS * len(classification_results)))118print("Duration: {}ms for {} test points, {}ms per point".format(duration_ms, len(test_points), duration_ms/len(test_points)))119#print("Accuracy {} (K={}) {}%".format(test_gesture, K, int(classification_results[test_gesture] * 100 / total_num)))120imgname = ""121plt.title("First layer Classifier with K-NN (K={})".format(K))122plt.xticks(fontsize=7)123if bar:124 plt.bar(classification_results.keys(), [x/total_num for x in classification_results.values()])125 imgname = "img/{}_k{}_all.png"126else:127 total_len = WINDOW_LEN_MS * len(test_points)128 mapping = dict([(5, dict([(0.2, 1), (0.4, 4), (0.6, 32), (0.8, 64), (1, 128)])), (3, dict([(0.33, 1), (0.67, 32), (1, 128)]))])129 plt.scatter([x * WINDOW_LEN_MS for x in range(len(classification_bests))], [x[0] for x in classification_bests], s=[mapping[K][x[1]] for x in classification_bests])130 imgname = "img/{}_k{}_bests.png"131plt.savefig("out/first.png")132plt.show()133plt.clf()134plt.title("Second layer Classifier")135plt.xticks(fontsize=7)136plt.scatter([x * WINDOW_LEN_MS for x in range(len(classification_filtered))], classification_filtered)137plt.savefig("out/second.png")138plt.show()139# certaiintty140plt.clf()141plt.title("Certainty")142plt.xticks(fontsize=7)143plt.scatter([x * WINDOW_LEN_MS for x in range(len(classification_bests))], [x[1] for x in classification_bests])144plt.savefig("out/certainty.png")...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1import csv2import fnmatch3import os4import cv25import numpy as np6import tensorflow as tf7import handshape_feature_extractor8import frameextractor9# global variables10BASE = os.path.dirname(os.path.abspath(__file__))11IMAGE_MODE = cv2.IMREAD_GRAYSCALE12TRAININGDATA = os.path.join(BASE, 'traindata')13TESTINGDATA = os.path.join(BASE, 'test')14TRAINOUT = os.path.join(BASE, 'trainout')15TESTOUT = os.path.join(BASE, 'testout')16# CNN model17hfe = handshape_feature_extractor.HandShapeFeatureExtractor().get_instance()18table = {19 'Num0': 0, 'num0': 0,20 'Num1': 1, 'num1': 1,21 'Num2': 2, 'num2': 2,22 'Num3': 3, 'num3': 3,23 'Num4': 4, 'num4': 4,24 'Num5': 5, 'num5': 5,25 'Num6': 6, 'num6': 6,26 'Num7': 7, 'num7': 7,27 'Num8': 8, 'num8': 8,28 'Num9': 9, 'num9': 9,29 'FanDown': 10, 'decreasefanspeed': 10,'FanSpeedDown':10,30 'FanOff': 11, 'fanoff': 11,31 'FanOn': 12, 'fanon': 12,32 'FanUp': 13, 'increasefanspeed': 13,'FanSpeedUp': 13,33 'LightOff': 14, 'lightoff': 14,34 'LightOn': 15, 'lighton': 15,35 'SetThermo': 16, 'setthermo': 16, 'Sethermostat':1636}37class Gesture:38 """Holds the data pertaining to gesture like the name,video file name, image file name etc"""39 def __init__(self, name=None, video_file=None, image_file=None, feature_score=None,40 true_label=float('inf'), predicted_label=float('inf')):41 """42 : name -Gesture name43 : video_file -video file name44 : image_file -image file name45 : feature_score - extract_feature() score46 : true_label - true label for training data47 : predicted_label -predicted label after cosine similarity48 """49 self.name = name50 self.image_file = image_file51 self.feature_score = feature_score52 self.video_file = video_file53 self.true_label = true_label54 self.predicted_label = predicted_label55 if self.video_file:56 self.video_name = self.video_file.replace('_SaiMadhuriMolleti', '').replace('.mp4', '')57 def is_identified(self):58 return self.predicted_label == self.true_label59def features_test():60 """ extracts the features from each testdata video provided """61 test_gesture = dict()62 for test_count, test_file in enumerate(fnmatch.filter(os.listdir(TESTINGDATA), '*.mp4')):63 te_gesture = feature_gesture(TESTINGDATA, TESTOUT, test_file, test_count)64 test_gesture[te_gesture.video_name] = te_gesture65 return test_gesture66def features_train():67 """ extracts the features from each train data video provided """68 train_gesture = dict()69 for train_count, train_file in enumerate(fnmatch.filter(os.listdir(TRAININGDATA), '*.mp4')):70 tr_gesture = feature_gesture(TRAININGDATA, TRAINOUT, train_file, train_count)71 train_gesture[tr_gesture.video_name] = tr_gesture72 return train_gesture73def get_gesture_name(file_path):74 name = file_path75 if '-' in file_path:76 name = '-'.join(file_path.split('-')[1:])77 elif '_' in file_path:78 name = file_path.split('_')[0]79 return name.replace(".mp4", "")80def feature_gesture(data_dir, out_dir, video_file, count):81 """Extract frame to a file and calculate its feature vector82 """83 video_path = os.path.join(data_dir, video_file)84 frameextractor.frameExtractor(video_path, out_dir, count)85 image_path = os.path.join(out_dir, '{0:05}.png'.format(count+1))86 gesture_name = get_gesture_name(video_file)87 image_file = os.path.basename(image_path)88 image_array = cv2.imread(image_path, IMAGE_MODE)89 vector = hfe.extract_feature(image_array)90 try:91 # get the known true label for train data92 true_label = table[gesture_name]93 except KeyError:94 # guess the expected true label for test data95 true_label = count % 1796 gesture = Gesture(gesture_name, video_file, image_file,97 vector, true_label)98 return gesture99def print_result(test_gesture, results_matrix):100 correct = np.sum([1 for x in test_gesture if test_gesture[x].is_identified()])101 total = results_matrix.sum()102 percentage = correct/total*100103 print("Matched {0} out of {1} => {2:0.2f} %".format(correct, total, percentage))104 with open(os.path.join(BASE, 'Results.csv'), mode='w') as results_csv:105 results_writer = csv.writer(results_csv, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)106 for i in sorted(test_gesture, key=lambda x: test_gesture[x].video_name):107 results_writer.writerow([test_gesture[i].predicted_label])108def main():109 test_gesture = features_test()110 train_gesture = features_train()111 print("Test Data","Prediction","True", "Match" )112 # calculate cosine similarity113 results_matrix = np.zeros((17, 17), dtype=int)114 for k in test_gesture:115 test_proba = test_gesture[k].feature_score116 cosine_similarity = float('inf')117 tr_video_file = None118 for key in train_gesture:119 trained = train_gesture[key].feature_score120 cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)121 y = cosine_loss(trained, test_proba).numpy()122 if y < cosine_similarity:123 cosine_similarity = y124 test_gesture[k].predicted_label = train_gesture[key].true_label125 tr_video_file = train_gesture[key].video_name126 results_matrix[test_gesture[k].predicted_label][test_gesture[k].true_label] += 1127 print(test_gesture[k].video_name,test_gesture[k].predicted_label,128 test_gesture[k].true_label, test_gesture[k].is_identified())129 print_result(test_gesture, results_matrix)130if __name__ == '__main__':...

Full Screen

Full Screen

HMM.py

Source:HMM.py Github

copy

Full Screen

1#By Mohit Minhas2import math3import numpy4#from sklearn.hmm import MultinomialHMM5#from hmmn import *6from hmmpy import *7from sklearn.cluster import k_means8#from scipy.cluster.vq import kmeans29def get_xyz_data(path,name):10 xfl=path+'\\'+name+'_x.csv'11 xx = numpy.genfromtxt(xfl, delimiter=',')12 yfl=path+'\\'+name+'_y.csv'13 xy = numpy.genfromtxt(yfl, delimiter=',')14 zfl=path+'\\'+name+'_z.csv'15 xz = numpy.genfromtxt(zfl, delimiter=',')16 x=[]17 x.append(xx)18 x.append(xy)19 x.append(xz)20 x=numpy.array(x)21 return x22"""23def emprob(M,N):24 a=1/float(N)25 E=numpy.zeros((M,N))26 for i in xrange(M):27 for j in xrange(N):28 E[i][j]=a29 return E30"""31def prior_transition_matrix(K,LR):32 P = numpy.multiply(1/float(LR),numpy.identity(K+1))33 w=1/float(LR)34 for i in xrange(1,K-(LR-1)+1): 35 for j in xrange(1,LR-1+1):36 P[i][i+j]=w37 for i in xrange(K-(LR-2),K+1):38 for j in xrange(1,K-i+1+1):39 P[i][i+(j-1)] = 1/float(K-i+1)40 P=P[1:,1:]41 return P42def get_point_centroids(indata,K,D):43 mean = numpy.zeros((indata.shape[1],D))44 for n in xrange(0,(indata.shape[1])):45 for i in xrange(0,(indata.shape[2])):46 for j in xrange(0,D):47 mean[n][j] = mean[n][j] + indata[j][n][i]48 mean[n] = mean[n]/(indata.shape[2])49 (centroids,x,y)=k_means(mean,K) #random order. change n_jobs to speed up50 return centroids51def get_point_clusters(data,centroids,D):52 XClustered = [[] for x in xrange(data.shape[2])]53 K = centroids.shape[0]54 for n in xrange(0,(data.shape[1])):55 for i in xrange(0,(data.shape[2])):56 temp = numpy.zeros((K,1))57 for j in xrange(0,K):58 #if (D==3)59 temp[j] = math.sqrt(math.pow((centroids[j][0] - data[0][n][i]),2)+math.pow((centroids[j][1] - data[1][n][i]),2)+math.pow((centroids[j][2] - data[2][n][i]),2));60 I = numpy.argmin(temp)61 XClustered[i].append(I)62 XClustered=numpy.array(XClustered)63 return XClustered64def pr_hmm(o,a,b,pi):65 n=len(a[0])66 T=len(o)67 for i in xrange(1,n+1):68 m[1][i]=b[i][o[1]]*pi[i];69 for t in xrange(1,(T-1)+1):70 for j in xrange(1,n+1):71 z=072 for i in xrange(1,n+1):73 z=z+a[i][j]*m[t][i]74 m[t+1][j]=z*b[j][o[t+1]]75 p=076 for i in xrange(1,n+1):77 p=p+m[T][i] 78 p=math.log(p)79 return p80D=381M=1282N=883LR=284train_gesture='x'85test_gesture='x'86gestureRecThreshold = 087training = get_xyz_data('data/train',train_gesture)88testing = get_xyz_data('data/test',test_gesture)89centroids = get_point_centroids(training,N,D)90ATrainBinned = get_point_clusters(training,centroids,D)91ATestBinned = get_point_clusters(testing,centroids,D)92pP = prior_transition_matrix(M,LR)93#W=emprob(M,N)94#print ATrainBinned95#model=MultinomialHMM(n_components=M,startprob_prior=pP,n_iter=50)96#model.n_symbols=N97#print model.n_symbols98#model.fit(ATrainBinned)99#model=MultinomialHMM(ATrainBinned,pP,[1:N]',M,cyc,.00001) #ENTER 100#logprob=model.score(ATestBinned)101#print logprob102hmm=HMM(n_states=M,V=[0,1,2,3,4,5,6,7],A=pP)103print 'TRAINING'104print105baum_welch(hmm,ATrainBinned,graph=False,verbose=True)106print 107print 'TESTING'108print109b=forward(hmm,ATestBinned[0])110print b111#model=DiscreteHmm(numstates=M,numclasses=N)112#model.learn(ATrainBinned,numsteps=ATrainBinned.shape[0])113"""114sumLik = 0.0115minLik = float('inf')116for j in xrange(0,len(ATrainBinned)):117 lik = pr_hmm(ATrainBinned[j],P,E.T,Pi)118 if (lik < minLik):119 minLik = lik120 sumLik = sumLik + lik121 122gestureRecThreshold = 2.0*sumLik/len(ATrainBinned)123print'\n\n********************************************************************\n'124print'Testing {0) sequences for a log likelihood greater than {1)\n'.format(len(ATestBinned),gestureRecThreshold)125print'********************************************************************\n\n'126recs = 0127tLL = numpy.zeros((len(ATestBinned),1))128for j in xrange(1,len(ATestBinned)):129 tLL[j][1] = pr_hmm(ATestBinned[j],P,E.T,Pi)130 if (tLL[j][1] > gestureRecThreshold):131 recs = recs + 1132 print 'Log likelihood: {0) > {1) (threshold) -- FOUND {2) GESTURE!\n'.format(tLL[j][1],gestureRecThreshold,test_gesture)133 else:134 print 'Log likelihood: {0} < {1} (threshold) -- NO {2} GESTURE.\n'.format(tLL[j][1],gestureRecThreshold,test_gesture)135 136print'Recognition success rate: {0) percent\n'.format(100*recs/len(ATestBinned))...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run uiautomator automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful