How to use test_instance_list method in uiautomator

Best Python code snippet using uiautomator

loadData.py

Source:loadData.py Github

copy

Full Screen

1from transformers import BertTokenizer, BertTokenizerFast2import torch3from torch.utils.data import Dataset, DataLoader4import logging5import os6from preprocessing.preprocessData import splitDatasetIntoTrainDevTest, preprocessDataAndSave7from preprocessing.utils import loadFromPickleFile8from preprocessing import const9class COVID19TaskDataset(Dataset):10 def __init__(self, instance_list):11 super(COVID19TaskDataset, self).__init__()12 self.instance_list = instance_list13 def __getitem__(self, index):14 return self.instance_list[index]15 def __len__(self):16 return len(self.instance_list)17class TokenizeCollator():18 def __init__(self, tokenizer, subtask_list, entity_start_token_id):19 20 self.tokenizer = tokenizer21 self.subtask_list = subtask_list22 self.entity_start_token_id = entity_start_token_id23 def __call__(self, batch):24 # Prepare Result25 gold_label_dict_batch = {subtask: [] for subtask in self.subtask_list}26 input_text_list_batch = []27 tweet_id_batch = []28 token_batch = []29 #print(batch[0])30 for input_text, subtask_label_dict, tweet_id, token_text in batch:31 32 input_text_list_batch.append(input_text)33 tweet_id_batch.append(tweet_id)34 token_batch.append(token_text)35 36 for subtask in self.subtask_list:37 gold_label_dict_batch[subtask].append(subtask_label_dict[subtask][1]) # 0 is gold chunk38 # Send to BERT's tokenizer39 tokenized_input_text_list_batch = self.tokenizer.batch_encode_plus(40 input_text_list_batch, pad_to_max_length=True, return_tensors='pt')41 input_ids = tokenized_input_text_list_batch['input_ids']42 # Not needed for RobertaModel43 if 'token_type_ids' in tokenized_input_text_list_batch:44 token_type_ids = tokenized_input_text_list_batch['token_type_ids']45 else:46 token_type_ids = None47 attention_mask = tokenized_input_text_list_batch['attention_mask']48 # Further processing49 # entity_start_positions = (input_ids == self.entity_start_token_id).nonzero()50 entity_start_positions = torch.nonzero(input_ids == self.entity_start_token_id, as_tuple=False)51 input_label_dict = {52 subtask: torch.LongTensor(gold_label_list)53 for subtask, gold_label_list in gold_label_dict_batch.items()54 }55 if entity_start_positions.size(0) == 0:56 # Send entity_start_positions to [CLS]'s position i.e. 057 entity_start_positions = torch.zeros(input_ids.size(0), 2).long()58 59 # DEBUG60 for subtask in self.subtask_list:61 assert input_ids.size(0) == input_label_dict[subtask].size(0)62 63 return {64 'input_ids': input_ids,65 'entity_start_positions': entity_start_positions,66 'token_type_ids': token_type_ids,67 'gold_labels': input_label_dict,68 'batch_data': batch,69 'tweet_id': tweet_id_batch70 }71def loadData (event,72 entity_start_token_id,73 tokenizer,74 batch_size = 8,75 train_ratio = 0.6, dev_ratio = 0.15,76 shuffle_train_data_flg = True, num_workers = 0,77 input_text_processing_func_list=[]):78 """Return DataLoader for train/dev/test and subtask_list79 Input:80 event -- name of event, one of 81 ['positive', 'negative', 'can_not_test', 'death', 'cure_and_prevention']82 tokenizer83 84 85 Keyword Arguments:86 batch_size -- [default 8]87 train_ratio -- [default 0.6]88 dev_ratio -- [default 0.15]89 shuffle_train_data_flg -- whether shuffle train DataLoader [default True]90 num_workers -- [default 0]91 92 """93 # Init Tokenizer94 # entity_start_token_id = tokenizer.convert_tokens_to_ids(["<E>"])[0]95 # Load Data96 preprocessed_data_file = os.path.join(const.DATA_FOLDER, f'{event}-preprocessed-data.pkl')97 # 98# if not os.path.isfile(preprocessed_data_file):99 # TODO use logging module100 print(f"File {preprocessed_data_file} doesn't exist, generating...")101 preprocessDataAndSave(event)102 # 103 subtask_list, raw_input_text_and_label_list = loadFromPickleFile(preprocessed_data_file)104 if input_text_processing_func_list:105 tmp_list = []106 print("Processing Input Text")107 for tweet_text, input_text, subtask_label_dict, tweet_id, token_text in raw_input_text_and_label_list:108 for processing_func in input_text_processing_func_list:109 input_text = processing_func(input_text)110 print(tweet_text, input_text, subtask_label_dict, tweet_id)111 tmp_list.append((tweet_text, input_text, subtask_label_dict, tweet_id, token_text))112 raw_input_text_and_label_list = tmp_list113 (train_instance_list,114 dev_instance_list,115 test_instance_list) = splitDatasetIntoTrainDevTest(116 raw_input_text_and_label_list, train_ratio=train_ratio, dev_ratio=dev_ratio)117 # TODO move to logging118 print(f"Dataset Size Report: {len(train_instance_list)} / "119 f"{len(dev_instance_list)} / {len(test_instance_list)} (train/dev/test)")120 train_dataset = COVID19TaskDataset(train_instance_list)121 dev_dataset = COVID19TaskDataset(dev_instance_list)122 test_dataset = COVID19TaskDataset(test_instance_list)123 collate_fn = TokenizeCollator(tokenizer, subtask_list, entity_start_token_id)124 train_dataloader = DataLoader(125 train_dataset, batch_size=batch_size, shuffle=shuffle_train_data_flg, num_workers=num_workers,126 collate_fn = collate_fn)127 dev_dataloader = DataLoader(128 dev_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn = collate_fn)129 test_dataloader = DataLoader(130 test_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn = collate_fn)131 return train_dataloader, dev_dataloader, test_dataloader, subtask_list132def loadNewData(event,133 entity_start_token_id,134 tokenizer,135 batch_size=8,136 train_ratio=0.6, dev_ratio=0.15,137 shuffle_train_data_flg=True, num_workers=0,138 input_text_processing_func_list=[]):139 """Return DataLoader for train/dev/test and subtask_list140 Input:141 event -- name of event, one of142 ['positive', 'negative', 'can_not_test', 'death', 'cure_and_prevention']143 tokenizer144 Keyword Arguments:145 batch_size -- [default 8]146 train_ratio -- [default 0.6]147 dev_ratio -- [default 0.15]148 shuffle_train_data_flg -- whether shuffle train DataLoader [default True]149 num_workers -- [default 0]150 """151 # Init Tokenizer152 # entity_start_token_id = tokenizer.convert_tokens_to_ids(["<E>"])[0]153 # Load Data154 preprocessed_data_file = os.path.join(const.NEW_DATA_FOLDER, f'{event}-preprocessed-data.pkl')155 #156 print(f"File {preprocessed_data_file} doesn't exist, generating...")157 preprocessDataAndSave(event)158 #159 subtask_list, raw_input_text_and_label_list = loadFromPickleFile(preprocessed_data_file)160 if input_text_processing_func_list:161 tmp_list = []162 print("Processing Input Text")163 for tweet_text, input_text, subtask_label_dict, tweet_id, token_text in raw_input_text_and_label_list:164 for processing_func in input_text_processing_func_list:165 input_text = processing_func(input_text)166 # print(tweet_text, input_text, subtask_label_dict, tweet_id)167 tmp_list.append((tweet_text, input_text, subtask_label_dict, tweet_id, token_text))168 raw_input_text_and_label_list = tmp_list169 (train_instance_list,170 dev_instance_list,171 test_instance_list) = splitDatasetIntoTrainDevTest(172 raw_input_text_and_label_list, train_ratio=1, dev_ratio=0)173 # TODO move to logging174 print(f"Dataset Size Report: {len(train_instance_list)} / "175 f"{len(dev_instance_list)} / {len(test_instance_list)} (train/dev/test)")176 train_dataset = COVID19TaskDataset(train_instance_list)177 dev_dataset = COVID19TaskDataset(dev_instance_list)178 test_dataset = COVID19TaskDataset(test_instance_list)179 collate_fn = TokenizeCollator(tokenizer, subtask_list, entity_start_token_id)180 train_dataloader = DataLoader(181 train_dataset, batch_size=batch_size, shuffle=shuffle_train_data_flg, num_workers=num_workers,182 collate_fn=collate_fn)183 dev_dataloader = DataLoader(184 dev_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)185 test_dataloader = DataLoader(186 test_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)...

Full Screen

Full Screen

BenchmarkRunner.py

Source:BenchmarkRunner.py Github

copy

Full Screen

1import subprocess2from tqdm import tqdm3import itertools as it4import os5import multiprocessing as mp6class BenchmarkRunner:7 def __init__(self, log_dir, exec_path="../src/job-shop-experiment/job-shop", repeat_time=1, warmup_seconds=0):8 self.exec_path = exec_path + " "9 self.log_dir = log_dir10 self.repeat_time = repeat_time11 self.available_parameter_dict, self.available_instance_list = self._get_avaialbe_benchmark_options()12 print("# Length of the available instance list is {}".format(len(self.available_instance_list)))13 self.test_parameter_dict = {}14 self.test_instance_list = []15 self.warmup_seconds = warmup_seconds16 # add testing parameters17 18 def _run_single_command(self, cmd_str):19 # function to execute a single command20 p = subprocess.Popen(cmd_str.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)21 output, err = p.communicate()22 return output.decode('utf-8')23 24 def run_test(self, log_dir=None):25 if log_dir==None:26 log_dir = self.log_dir27 all_parameter_names = sorted(self.test_parameter_dict)28 # all_parameter_names = self.test_parameter_dict29 parameter_combination_list = list(it.product(*((self._add_key_2_every_val(para_name, self.test_parameter_dict[para_name]) for para_name in all_parameter_names))))30 testing_list = [ (list(para)+[instance]) for para in parameter_combination_list for instance in self.test_instance_list]31 32 # print(testing_list)33 result_list = []34 if(self.warmup_seconds > 0):35 print("Staring warmup for {} seconds".format(self.warmup_seconds))36 self._run_single_command('stress --cpu 1 --timeout ' + str(self.warmup_seconds))37 print("Finished warmup. Now starting the benchmark.")38 for i in range(self.repeat_time):39 for test in tqdm(testing_list):40 cmd = self.exec_path + " ".join(test)41 log_file_name = self.log_dir + " ".join(test).replace(" ", "_").replace("-", "")+"_run_"+str(i)+".log"42 exists = os.path.isfile(log_file_name)43 if not exists:44 # run a single benchmark45 result = self._run_single_command(cmd)46 # write the result to a log file47 with open(log_file_name, "w") as log_file:48 log_file.write(cmd)49 log_file.write('\n')50 log_file.write(result)51 result_list.append(result)52 53 return result_list54 def _run_single_command(self, cmd_str):55 # function to execute a single command56 p = subprocess.Popen(cmd_str.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)57 output, err = p.communicate()58 return output.decode('utf-8')59 60 def run_test_parallel(self, log_dir=None, process_count=mp.cpu_count()):61 if process_count > mp.cpu_count():62 print('number of of process should be smaller than cpu_count.')63 process_count = mp.cpu_count()64 if log_dir==None:65 log_dir = self.log_dir66 all_parameter_names = sorted(self.test_parameter_dict)67 # all_parameter_names = self.test_parameter_dict68 parameter_combination_list = list(it.product(*((self._add_key_2_every_val(para_name, self.test_parameter_dict[para_name]) for para_name in all_parameter_names))))69 testing_list = [ (list(para)+[instance]) for para in parameter_combination_list for instance in self.test_instance_list]70 71 # print(testing_list)72 result_list = []73 if(self.warmup_seconds > 0):74 print("Staring warmup for {} seconds".format(self.warmup_seconds))75 self._run_single_command('stress --cpu 1 --timeout ' + str(self.warmup_seconds))76 print("Finished warmup. Now starting the benchmark.")77 78 print("The first element in testing list is {}".format(testing_list[0]))79 80 with mp.Pool(process_count) as pool:81 result_list = pool.starmap_async(self._run_single_JSSP_instance, [(test, 0) for test in testing_list]).get()82 # for i in range(self.repeat_time):83 # for test in tqdm(testing_list):84 # pool.apply_async(self._run_single_JSSP_instance, args=(test, i))85 return result_list86 def _run_single_JSSP_instance(self, test, repeat_time):87 cmd = self.exec_path + " ".join(test)88 log_file_name = self.log_dir + " ".join(test).replace(" ", "_").replace("-", "")+"_run_"+str(repeat_time)+".log"89 exists = os.path.isfile(log_file_name)90 if not exists:91 # run a single benchmark92 result = self._run_single_command(cmd)93 # write the result to a log file94 with open(log_file_name, "w") as log_file:95 log_file.write(cmd)96 log_file.write('\n')97 log_file.write(result)98 return result99 # def add_testing_instances(self, instance_list):100 # print("Start add_testing_instances")101 # for instance in instance_list:102 # if instance.split()[-1] in self.available_instance_list:103 # self.test_instance_list.append(instance)104 105 def add_testing_instances(self, instance_list):106 print("Start add_testing_instances")107 for instance in instance_list:108 self.test_instance_list.append(instance)109 def _add_key_2_every_val(self, key, val_list):110 return [(key + " " + v) for v in val_list]111 112 def add_parameter_options(self, para_dict):113 # add values for benchmarks for one parameter114 assert len(para_dict) == 1, 'Please add one parameter at a time'115 key = list(para_dict.keys())[0]116 assert key in self.available_parameter_dict, 'Parameter {} is not avaiable'.format(key)117 val = para_dict[key]118 self.test_parameter_dict[key] = val119 120 def get_current_test_instances(self):121 return self.test_instance_list122 123 def get_current_test_parameters(self):124 return self.test_parameter_dict125 126 def get_available_instance_list(self):127 return self.available_instance_list128 129 def _get_avaialbe_benchmark_options(self, exec_path=None, help_cmd=' --help'):130 # get available parameter options from the program131 if exec_path==None:132 exec_path = self.exec_path133 134 help_str = self._run_single_command(exec_path + help_cmd)135 help_list = help_str.replace('\t', ' ').replace('\n', ' ').split(' ')136 # get parameter options137 parameter_list = [x for x in help_list if x.startswith('-') and len(x) >1][3:]138 parameter_dict = {}139 for option in parameter_list:140 parameter_dict[option] = []141 # get jssp instance options142 instance_list = "".join(help_list[help_list.index('instances:')+1:]).split(',')[:-1]143 ...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2#import nltk3#import numpy as np4#from maxent import MaxEnt5#from helper import Instance6#import util7#import evaluator8from TransitionParsing import TranSys,leftArc,rightArc,shift9from maxent import MaxEnt10from evaluator import test_classifier11def load_sentence(fname):12 f = open(fname,'r')13 raw_data = []14 sentence = [] 15 for line in f:16 if line.strip():17 line = line.split()18 word_index = line[0]19 token = line[1]20 POS = line[3]21 head_index = line[6]22 label = line[7]23 element = tuple((int(word_index),token,POS,int(head_index),label))24 sentence.append(element)25 else:26 raw_data.append(sentence)27 #print sentence28 sentence=[]29 30 return raw_data31#--------------------------32sentence_instances = load_sentence("data/wsj.00.01.22.24.conll")33test_sentence_instances = load_sentence("data/wsj.23.conll")34#sentence_instances = load_sentence("data/test")35#test_sentence_instance = load_sentence("data/test")36transition_codebook = {'LeftArc':leftArc,37 'RightArc':rightArc,38 'Shift':shift}39tranSys = TranSys(transition_codebook)40instance_list = []41test_instance_list = []42sentence_list = []43for sentence,test_sentence in zip(sentence_instances,test_sentence_instances):44 CT_pairs = tranSys.Gold_parse(sentence)45 #print [c.sigma.list for c,t in CT_pairs]46 instances = tranSys.feature_extract(CT_pairs)47 instance_list += instances 48 49 #for testing the classifier 50 test_CT_pairs = tranSys.Gold_parse(test_sentence)51 test_instances = tranSys.feature_extract(test_CT_pairs)52 test_instance_list += test_instances53 54#----------------------------------55# training the NaiveBayes Classifier in nltk and test the 6+1 featuresets56# to run the NaiveBayes evaluation have to modify some of the code in TransitionParsing and evaluator since the 57# nltk classifier and self-defined ME classifier has different methods58# import nltk59# featuresets = [(instance.data,instance.label) for instance in instance_list]60# testsets = [(instance.data,instance.label) for instance in test_instance_list]61# train_set,test_set = featuresets[:],testsets[:] 62# classifier = nltk.NaiveBayesClassifier.train(train_set) 63# print nltk.classify.accuracy(classifier, test_set)64# #print classifier.labels()65# CM = test_classifier(classifier,test_set)66# CM.print_out()67#---------------------------------- 68#MaxEnt training69#ME = MaxEnt()70#ME.train(instance_list)71#ME.save("dependency_parsing_classifier.json")72#finish training73#----------------------------------74#testing parser and then use the loaded ME classifier to do the decoding thing and write the parsing result in file parser.conll75ME = MaxEnt.load("dependency_parsing_classifier.json")76CM = test_classifier(ME,test_instance_list)77CM.print_out()78tranSys = TranSys(transition_codebook)79wfile = open('parser.conll','w')80for test_sentence in test_sentence_instances:81 new_sentence = tranSys.decode_parser(ME,test_sentence)82 for element in new_sentence:83 if element[0] != 0:84 #wfile.write('{0:<10}{1:<15}{2:<10}{3:<10}{4:<10}{5:<10}{6:<10}{7:<10}{8:<10}{9:<10}'.format(element[0],element[1],'_',element[2],element[2],'_',element[3],'_','_','_'))85 wfile.write(str(element[0])+'\t'+str(element[1])+'\t'+'_'+'\t'+str(element[2])+'\t'+str(element[2])+'\t'+'_'+'\t'+str(element[3])+'\t'+str(element[4])+'\t'+'_'+'\t'+'_')86 wfile.write("\n") 87 wfile.write("\r\n")...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run uiautomator automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful