How to use print_usage method in localstack

Best Python code snippet using localstack_python

getImportantBlocks.py

Source:getImportantBlocks.py Github

copy

Full Screen

...16def print_error(err):17 if err != '':18 print 'Error: ' + str(err)19 sys.exit(1)20def print_usage(err=''):21 print "usage : " + sys.argv[0]22 print "[Optional Arguments]:"23 print " --blockmin <block_minimum_to_include> [default = " + str(DEFAULT_BLOCK_MIN) + "]"24 print "[Positional Arguments]:"25 print " (optional) a .jbbinst.static file, followed by..."26 print " a list of .jbbinst trace files"27 print ""28 print "Example: " + sys.argv[0] + " --blockmin 1000 dynTest.r*.jbbinst"29 print_error(err)30def file_exists(filename):31 if os.path.isfile(filename):32 return True33 return False34# classes describing input file and constituent components35class ImageLine:36 def __init__(self, toks):37 if len(toks) != 7:38 print_usage('invalid image tokens: ' + str(toks))39 if toks[0] != IMAGE_IDENTIFIER:40 print_usage('expecting first token in image line as ' + IMAGE_IDENTIFIER)41 self.type = toks[3]42 if self.type != 'Executable' and self.type != 'SharedLib':43 print_usage('valid types for image are "Executable" and "SharedLib": ' + self.type)44 self.name = toks[4]45 try:46 self.hashcode = int(toks[1], 16)47 self.sequence = int(toks[2])48 self.blockcount = int(toks[5])49 self.loopcount = int(toks[6])50 except ValueError:51 print_usage('problem parsing image line; expecting ints for the following: ' + str([toks[1], toks[2], toks[5], toks[6]]))52 53 def __str__(self):54 return ''55 def id(self):56 return self.name57class CounterLine(object):58 def __init__(self, toks):59 self.threadcounters = {}60 def addthreadcount(self, toks):61 if len(toks) != 2:62 print_usage('invalid thread count tokens: ' + str(toks))63 64 try:65 tseq = int(toks[0])66 tcnt = int(toks[1])67 if self.threadcounters.has_key(tseq):68 print_usage('duplicate thread count for thread ' + tseq + ' in block ' + self.sequence + ' image ' + self.image)69 self.threadcounters[tseq] = tcnt70 except ValueError:71 print_usage('problem parsing thread count line: ' + str(toks))72class BlockLine(CounterLine):73 def __init__(self, toks):74 super(BlockLine, self).__init__(toks)75 if len(toks) != 9:76 print_usage('invalid block tokens: ' + str(toks))77 if toks[0] != BLOCK_IDENTIFIER:78 print_usage('expecting first token in block line as ' + BLOCK_IDENTIFIER)79 if toks[5] != '#':80 print_usage('malformed block line: ' + str(toks))81 self.function = toks[7]82 try:83 self.hashcode = int(toks[2], 16)84 self.sequence = int(toks[1])85 self.image = int(toks[3])86 self.count = int(toks[4])87 self.address = int(toks[8], 16)88 except ValueError:89 print_usage('problem parsing block line; expecting ints for the following: ' + str([toks[1], toks[2], toks[3], toks[4], toks[8]]))90 try:91 t = toks[6].split(':')92 if len(t) != 2:93 raise ValueError94 self.file = t[0]95 self.lineno = int(t[1])96 except ValueError:97 print_usage('problem parsing block line; expecting <file>:<line> instead of the following: ' + str(toks[6]))98 99 def id(self):100 return str(self.image) + str(self.hashcode)101class LoopLine(CounterLine):102 def __init__(self, toks):103 super(LoopLine, self).__init__(toks)104 if len(toks) != 8:105 print_usage('invalid loop tokens: ' + str(toks))106 if toks[0] != LOOP_IDENTIFIER:107 print_usage('expecting first token in loop line as ' + LOOP_IDENTIFIER)108 if toks[4] != '#':109 print_usage('malformed loop line: ' + str(toks))110 self.function = toks[6]111 try:112 self.hashcode = int(toks[1], 16)113 self.image = int(toks[2])114 self.count = int(toks[3])115 self.address = int(toks[7], 16)116 except ValueError:117 print_usage('problem parsing loop line; expecting ints for the following: ' + str([toks[1], toks[2], toks[3], toks[7]]))118 try:119 t = toks[5].split(':')120 if len(t) != 2:121 raise ValueError122 self.file = t[0]123 self.lineno = int(t[1])124 except ValueError:125 print_usage('problem parsing loop line; expecting <file>:<line> instead of the following: ' + str(toks[5]))126 def id(self):127 return str(self.image) + str(self.hashcode)128class StaticBlockLine:129 def __init__(self, toks, image):130 self.hashcode = toks[1]131 self.memOps = int(toks[2])132 self.fpOps = int(toks[3])133 self.insns = int(toks[4])134 self.image = image135 def id(self):136 return str(self.image) + str(self.hashcode)137class StaticFile:138 @staticmethod139 def isStaticFile(f):140 r = re.compile(INPUT_STATIC_NAME_REGEX)141 p = r.match(f)142 if p == None:143 return False144 return True145 def __init__(self, sfile):146 self.sfile = sfile147 if not file_exists(self.sfile):148 print_usage(str(sfile) + ' is not a valid file')149 if not StaticFile.isStaticFile(sfile):150 print_usage('expecting a specific format for file name (' + INPUT_STATIC_NAME_REGEX + '): ' + sfile)151 print 'Reading static file ' + sfile152 self.image = 0153 self.blocks = {}154 f = open(sfile)155 for line in f:156 toks = line.strip().split()157 if len(toks) == 0:158 continue;159# if toks[1] == "sha1sum":160# pat = "(................).*"161# r = re.compile(pat)162# m = r.match(toks[3])163# self.image = m.group(1)164 if toks[0].isdigit():165 b = StaticBlockLine(toks, self.image)166 self.blocks[b.id()] = b167 168class JbbTraceFile:169 def __init__(self, tfile):170 self.tfile = tfile171 if not file_exists(self.tfile):172 print_usage(str(f) + ' is not a valid file') # FIXME f? not tfile?173 r = re.compile(INPUT_JBB_NAME_REGEX)174 p = r.match(self.tfile)175 if p == None:176 print_usage('expecting a specific format for file name (' + INPUT_JBB_NAME_REGEX + '): ' + self.tfile)177 try:178 self.application = p.group(1)179 self.mpirank = int(p.group(2))180 self.mpitasks = int(p.group(3))181 except ValueError:182 print_usage('expecting a specific format for file name (' + INPUT_JBB_NAME_REGEX + '): ' + self.tfile)183 self.rimage = {}184 self.images = {}185 self.blocks = {}186 self.loops = {}187 f = open(self.tfile)188 for line in f:189 toks = line.strip().split()190 if len(toks) > 0:191 if toks[0] == IMAGE_IDENTIFIER:192 c = ImageLine(toks)193 i = c.id()194 if self.images.has_key(i):195 print_usage('duplicate image: ' + str(i))196 self.images[i] = c197 self.rimage[c.sequence] = i198 elif toks[0] == BLOCK_IDENTIFIER:199 c = BlockLine(toks)200 i = c.id()201 if self.blocks.has_key(i):202 print_usage('duplicate block: ' + str(i))203 self.blocks[i] = c204 elif toks[0] == LOOP_IDENTIFIER:205 c = LoopLine(toks)206 i = c.id()207 if not self.loops.has_key(i):208 self.loops[i] = []209 self.loops[i].append(c)210 211 def __str__(self):212 return ''213def main():214 # handle command line215 try:216 optlist, args = getopt.getopt(sys.argv[1:], '', ['blockmin='])217 except getopt.GetoptError, err:218 print_usage(err)219 blockmin = DEFAULT_BLOCK_MIN220 for i in range(0,len(optlist),1):221 if optlist[i][0] == '--blockmin':222 try:223 blockmin = int(optlist[i][1])224 if (blockmin < 1):225 raise ValueError226 except ValueError:227 print_usage('argument to --blockmin should be a positive int')228 if len(args) == 0:229 print_usage('requires a list of jbbinst trace files as positional arguments')230 staticFile = args[0]231 if StaticFile.isStaticFile(staticFile):232 staticFile = StaticFile(staticFile)233 args = args[1:]234 else:235 staticFile = None236 if len(args) == 0:237 print_usage('requires a list of jbbinst trace files as positional arguments')238 if staticFile != None:239 outfile = open(OUTPUT_OPCOUNT_NAME, 'w')240 outfile.write("# Rank\ttotInsns\ttotMemops\ttotFpops\n")241 else:242 outfile = None243 # parse input files (all remaining positional args)244 imagelist = {}245 ntasks = 0246 appname = ''247 index = 0248 imagecounts = {}249 total = 0250 blockfiles = {}251 for f in args:252 index += 1253 print 'Processing input file ' + str(index) + ' of ' + str(len(args)) + ': ' + f254 b = JbbTraceFile(f)255 if blockfiles.has_key(b.mpirank):256 print_usage('duplicate mpi rank found in input files: ' + str(b.mpirank))257 blockfiles[b.mpirank] = 1258 if outfile != None:259 blockFile = b260 totInsns = 0261 totMemops = 0262 totFpops = 0263 for block in staticFile.blocks.values():264 try:265 dynBlock = blockFile.blocks[block.id()]266 except KeyError:267 continue268 totInsns = totInsns + block.insns * dynBlock.count269 totMemops = totMemops + block.memOps * dynBlock.count270 totFpops = totFpops + block.fpOps * dynBlock.count271 outfile.write(str(blockFile.mpirank) + "\t" + str(totInsns) + "\t" + str(totMemops) + "\t" + str(totFpops) + "\n")272 273 for ki in b.images.keys():274 imagelist[ki] = 1275 if ntasks == 0:276 ntasks = b.mpitasks277 if ntasks != b.mpitasks:278 print_usage('all files should be from a run with the same number of mpi tasks: ' + str(ntasks))279 if ntasks == 0:280 print_usage('number of mpi tasks should be > 0')281 if appname == '':282 appname = b.application283 if appname != b.application:284 print_usage('all files should be from a run with the same number application name: ' + appname)285 # add up block counts from this rank286 for kb in b.blocks.keys():287 bb = b.blocks[kb]288 iid = b.rimage[bb.image]289 if not imagecounts.has_key(iid):290 imagecounts[iid] = {}291 if not imagecounts[iid].has_key(bb.hashcode):292 imagecounts[iid][bb.hashcode] = 0293 imagecounts[iid][bb.hashcode] += bb.count294 total += bb.count295 if outfile != None:296 outfile.close()297 # write to file if block count exceeds minimum298 imagefiles = {}...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1import pandas as pd2from WordsTokenizer import WordsTokenizer3from SemanticsClassifier import SemanticsClassifier4def print_usage():5 print("Предсказания семантики на кофейной гуще! Выберите опцию:")6 print("1. Указать путь до файла с семантикой")7 print("2. Указать путь до файла с запросами")8 print("3. Указать параметры модели. Старая при этом будет удалена")9 print("4. Указать параметры токенайзера. Старый токенайзер и старая МОДЕЛЬ при этом будут удалены, но параметры "10 "модели сохранятся")11 print("5. Указать путь для вывода результатов")12 print("6. Запусить предсказания")13 print("?. Показать это окно еще раз")14 print("q. Выйти из программы")15def read_file():16 beautiful_print("Введите путь к файлу...")17 s = input()18 try:19 df = pd.read_csv(s)20 except:21 print("Не удалось открыть файл. Попытаться еще раз? (y/n)")22 s = input().lower()23 if s == 'y':24 return read_file()25 else:26 return None27 beautiful_print('Файл успешно считан! Возврат в главное меню...')28 print_usage()29 return df30def print_delimeter():31 print('------------------------------------')32def read_param_model():33 print_delimeter()34 print("Введите параметры a, n_jobs, be_verbose, p через пробел. Их описание:")35 print("a -- Вес косинусного расстояние (для Левенштейна будет 1-a)")36 print("n_jobs -- число потоков")37 # FIXME: Сделать обработку be_verbose38 print("be_verbose -- Писать больше информации. Пока не работает: пишет много всегда")39 print("p -- минимальный порог расстояния, ниже которого запрос будет воспринят как 'Unknown'. От 0 до 1")40 print_delimeter()41 params = input().split()42 try:43 a = float(params[0])44 n_jobs = int(params[1])45 be_verbose = bool(params[2])46 p = float(params[3])47 except:48 print("Некорректный ввод. Хотите попробовать еще раз? (y/n)")49 ans = input().lower()50 if ans == 'y':51 read_param_model()52 else:53 return None54 return a, n_jobs, be_verbose, p55def read_param_tokenizer():56 print_delimeter()57 print("Введите параметры p, n_jobs через пробел. Их описание:")58 print("p -- максимальное нормированное расстояние Левенштейна (степень схожести)"59 " между двумя словами, при котором их считать одним и тем же словом. От 0 до 1")60 print("n_jobs -- число потоков")61 print_delimeter()62 params = input().split()63 try:64 p = float(params[0])65 n_jobs = int(params[1])66 except:67 print("Некорректный ввод. Хотите попробовать еще раз? (y/n)")68 ans = input().lower()69 if ans == 'y':70 read_param_model()71 else:72 return None73 return p, n_jobs74def beautiful_print(s):75 print_delimeter()76 print(s)77 print_delimeter()78if __name__ == "__main__":79 print_usage()80 tokenizer = WordsTokenizer()81 model = SemanticsClassifier()82 out_path = '../results/result.csv'83 df_sem, df_search = None, None84 while True:85 n = input()86 if n not in ['1', '2', '3', '4', '5', '6', '?', 'q']:87 print("Некорректный ввод, повторите еще раз. Если нужна помощь, введите '?' без кавычек")88 continue89 if n == '1':90 df_sem = read_file()91 if df_sem is None:92 beautiful_print("Файл не был считан. Выберите другую опцию")93 print_usage()94 if n == '2':95 df_search = read_file()96 if df_search is None:97 beautiful_print("Файл не был считан. Выберите другую опцию")98 print_usage()99 if n == '3':100 par1 = read_param_model()101 model = SemanticsClassifier(a=par1[0], n_jobs=par1[1], be_verbose=par1[2], p=par1[3], tokenizer=tokenizer)102 beautiful_print("Параметры модели установлены. Возврат в главное меню...")103 print_usage()104 if n == '4':105 par2 = read_param_tokenizer()106 tokenizer = WordsTokenizer(p=par2[0], n_jobs=par2[1])107 model = SemanticsClassifier(a=par1[0], n_jobs=par1[1], be_verbose=par1[2], p=par1[3], tokenizer=tokenizer)108 beautiful_print("Параметры модели установлены. Возврат в главное меню...")109 print_usage()110 if n == '5':111 print("Введите путь файла для вывода...")112 out_path = input()113 beautiful_print("Путь установлен. Возврат в главное меню...")114 print_usage()115 if n == '6':116 if df_sem is None or df_search is None:117 beautiful_print("Не все файлы указаны. Возврат в главное меню...")118 print_usage()119 continue120 tokenizer.fit(df_search['search'].values)121 model.train(df_sem['keyword_name'].values)122 predictions = model.predict(df_search['search'].values)123 df_search['predictions'] = predictions124 print_delimeter()125 print("Предсказания готовы. Записываем в файл...")126 df_search.to_csv(out_path)127 print('Готово! Возврат в главное меню...')128 print_delimeter()129 print()130 print_usage()131 if n == '?':132 print_usage()133 if n == 'q':134 beautiful_print("Благодарим за использование наших услуг!")...

Full Screen

Full Screen

readProcessedTrace.py

Source:readProcessedTrace.py Github

copy

Full Screen

...6# hash_code dyn_fp_cnt dyn_mem_cnt L1hr L2hr [L3hr] func_name line_number static_ft_cnt static_mem_cnt avg_memop_size dynamic_bb_cnt dynamic_bb_perc7import getopt8import string9import sys10def print_usage(err):11 print 'Error: ' + str(err)12 print "usage : " + sys.argv[0]13 print " --application <application>"14 print " --dataset <dataset>"15 print " --cpu_count <cpu count>"16 print " --sysid <sys id>"17 print " --level <level count>"18 print " --cpu <cpu>"19 print " --input_dir <input dir>"20## set up command line args 21try:22 optlist, args = getopt.getopt(sys.argv[1:], '', ['application=', 'dataset=', 'cpu_count=', 'sysid=', 'level=', 'cpu=', 'input_dir='])23except getopt.GetoptError, err:24 print_usage(err)25 sys.exit(-1)26if len(args) > 0:27 print_usage('extra arguments are invalid ' + str(args))28 sys.exit(-1)29application = ''30dataset = ''31cpu_count = 032sysid = 033level = 034input_dir = ''35for i in range(0,len(optlist),1):36 if optlist[i][0] == '--cpu_count':37 try:38 cpu_count = int(optlist[i][1])39 except ValueError, err:40 print_usage(err)41 sys.exit(-1)42 elif optlist[i][0] == '--sysid':43 try:44 sysid = int(optlist[i][1])45 except ValueError, err:46 print_usage(err)47 sys.exit(-1)48 elif optlist[i][0] == '--level':49 try:50 level = int(optlist[i][1])51 except ValueError, err:52 print_usage(err)53 sys.exit(-1)54 elif optlist[i][0] == '--cpu':55 try:56 cpu = int(optlist[i][1])57 except ValueError, err:58 print_usage(err)59 sys.exit(-1)60 elif optlist[i][0] == '--application':61 application = optlist[i][1]62 elif optlist[i][0] == '--dataset':63 dataset = optlist[i][1]64 elif optlist[i][0] == '--input_dir':65 input_dir = optlist[i][1]66 else:67 print_usage('unknown argument ' + str(optlist[i][0]))68 sys.exit(-1)69if application == '':70 print_usage('--application required')71 sys.exit(-1)72if dataset == '':73 print_usage('--dataset required')74 sys.exit(-1)75if cpu_count == 0:76 print_usage('--cpu_count required')77 sys.exit(-1)78if sysid == 0:79 print_usage('--sysid required')80 sys.exit(-1)81if level == 0:82 print_usage('--level required')83 sys.exit(-1)84if level < 2 or level > 3:85 print_usage('argument to --level should be 2 or 3')86 sys.exit(-1)87if input_dir == '':88 print_usage('--input_dir required')89 sys.exit(-1)90trace_filen = '%(input_dir)s/%(application)s_%(dataset)s_%(cpu_count)04d_%(cpu)04d.sysid%(sysid)d' % { 'input_dir':input_dir, 'application':application, 'dataset':dataset, 'cpu_count':cpu_count, 'cpu':cpu, 'sysid':sysid }91tracef = open(trace_filen, 'r')92trace_data = tracef.readlines()93tracef.close()94bb2func_filen = '%(input_dir)s/%(application)s_%(dataset)s_%(cpu_count)04d.bb2func' % { 'input_dir':input_dir, 'application':application, 'dataset':dataset, 'cpu_count':cpu_count }95bb2funcf = open(bb2func_filen, 'r')96bb2func_data = bb2funcf.readlines()97bb2funcf.close()98static_filen = '%(input_dir)s/%(application)s_%(dataset)s_%(cpu_count)04d.static' % { 'input_dir':input_dir, 'application':application, 'dataset':dataset, 'cpu_count':cpu_count }99staticf = open(static_filen, 'r')100static_data = staticf.readlines()101staticf.close()102bbbytes_filen = '%(input_dir)s/%(application)s_%(dataset)s_%(cpu_count)04d.bbbytes' % { 'input_dir':input_dir, 'application':application, 'dataset':dataset, 'cpu_count':cpu_count }103bbbytesf = open(bbbytes_filen, 'r')104bbbytes_data = bbbytesf.readlines()105bbbytesf.close()106# fill a dictionary with the trace data, keyed on bb hashcode107bb_info = {}108for i in range(0, len(trace_data), 1):109 toks = [d.strip() for d in trace_data[i].split()]110 if len(toks) != 3 + level:111 print_usage('invalid number of tokens on line ' + str(i+1) + ' of trace file ' + str(trace_filen))112 sys.exit(-1)113 bb_info[toks[0]] = toks[1:]114# find the bb2func info for all of the blocks that appear in bb_info115for i in range(0, len(bb2func_data), 1):116 toks = [d.strip() for d in bb2func_data[i].split()]117 if len(toks) != 4:118 print_usage('invalid number of tokens on line ' + str(i+1) + ' of bb2func file ' + str(bb2func_filen))119 sys.exit(-1)120 if bb_info.has_key(toks[0]):121 bb_info[toks[0]].append(toks[1])122 bb_info[toks[0]].append(toks[3])123# find the static info for all of the blocks that appear in bb_info124for i in range(0, len(static_data), 1):125 toks = [d.strip() for d in static_data[i].split()]126 if len(toks) != 5:127 print_usage('invalid number of tokens on line ' + str(i+1) + ' of static file ' + str(static_filen))128 sys.exit(-1)129 if bb_info.has_key(toks[0]):130 bb_info[toks[0]].append(toks[1])131 bb_info[toks[0]].append(toks[2])132 bb_info[toks[0]].append(toks[3])133# find the bbbytes info for all of the blocks that appear in bb_info134for i in range(0, len(bbbytes_data), 1):135 toks = [d.strip() for d in bbbytes_data[i].split()]136 if len(toks) != 2:137 print_usage('invalid number of tokens on line ' + str(i+1) + ' of bbbytes file ' + str(bbbytes_filen))138 sys.exit(-1)139 if bb_info.has_key(toks[0]):140 bb_info[toks[0]].append(toks[1])141# determine number of accesses to each block142block_total = 0143for k in bb_info.keys():144 try:145 block_accesses = int(bb_info[k][1]) / int(bb_info[k][6 + level])146 bb_info[k].append(str(block_accesses))147 block_total += block_accesses148 except ValueError, err:149 print_usage(err)150 sys.exit(-1)151# print each combined block info152for k in bb_info.keys():153 block_perc = float(bb_info[k][8 + level]) / float(block_total)154 if block_perc > 0.01:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful