How to use filter_line method in localstack

Best Python code snippet using localstack_python

generate_ablation_series.py

Source:generate_ablation_series.py Github

copy

Full Screen

1from pathlib import Path2from csv import reader as csv_reader3from csv import writer as csv_writer4import random5import numpy as np6source_file = "C:\\Users\\Andrei\\Dropbox\\workspaces\\JHU\\Ewald Lab\\" \7 "Kp_Km data\\humanized_weighted_abs_log-fold.txt"8background_file = "C:\\Users\\Andrei\\Dropbox\\workspaces\\JHU\\Ewald Lab\\" \9 "Kp_Km data\\humanized_genes_background.txt"10source_path = Path(source_file)11fname = source_path.stem12fname = fname + '_ablations'13storage_folder = Path(source_path.parent.joinpath(fname))14storage_folder.mkdir(parents=True, exist_ok=True)15def dump_experiment(experiment_name, corrected_lines, write_outs=[], dump_out=False):16 if dump_out:17 print(write_outs)18 return19 write_outs.append(experiment_name)20 updated_experiment_name = experiment_name + '.tsv'21 print('debug', storage_folder)22 print('debug2', updated_experiment_name)23 with open(Path(storage_folder).joinpath(updated_experiment_name), 'wt',24 newline='', encoding='utf-8') as destination:25 writer = csv_writer(destination, delimiter='\t')26 writer.writerows(corrected_lines)27lines = []28with open(source_file, 'rt') as source:29 reader = csv_reader(source, delimiter='\t')30 for line in reader:31 lines.append(line)32background_lines = []33with open(background_file, 'rt') as source:34 reader = csv_reader(source, delimiter='\t')35 for line in reader:36 background_lines.append(line[0])37for removal_value in [0.05, 0.1, 0.2, 0.5]:38 # padding filter vector39 filter_line = [True]*len(lines)40 start_point = int(len(filter_line)*removal_value)41 filter_line = np.array(filter_line)42 filter_line[-start_point:] = False43 filter_line = filter_line.tolist()44 corrected_lines = [duplet for _filtered, duplet in zip(filter_line, lines) if _filtered]45 dump_experiment('lowest_%d_percent_removed' % (removal_value*100), corrected_lines)46 corrected_lines = [duplet47 if _filtered48 else [background_lines[random.randint(0, len(background_lines))], duplet[1]]49 for _filtered, duplet50 in zip(filter_line, lines)]51 dump_experiment('lowest_%d_percent_set_to_random' % (removal_value*100), corrected_lines)52 random.shuffle(filter_line)53 corrected_lines = [duplet for _filtered, duplet in zip(filter_line, lines) if _filtered]54 dump_experiment('random_%d_percent_removed' % (removal_value*100), corrected_lines)55 random.shuffle(filter_line)56 corrected_lines = [duplet57 if _filtered58 else [background_lines[random.randint(0, len(background_lines))], duplet[1]]59 for _filtered, duplet60 in zip(filter_line, lines)]61 dump_experiment('random_%d_percent_set_to_random' % (removal_value*100), corrected_lines)62flat_line = [(duplet[0],) for duplet in lines]63dump_experiment('no_weights', flat_line)64for removal_value in [0.05, 0.1, 0.2, 0.5]:65 # padding filter vector66 filter_line = [True]*len(lines)67 start_point = int(len(filter_line)*removal_value)68 filter_line = np.array(filter_line)69 filter_line[-start_point:] = False70 filter_line = filter_line.tolist()71 corrected_lines = [(duplet[0],) for _filtered, duplet in zip(filter_line, lines) if _filtered]72 dump_experiment('no_weights_lowest_%d_percent_removed' % (removal_value*100), corrected_lines)73 corrected_lines = [(duplet[0],)74 if _filtered75 else (background_lines[random.randint(0, len(background_lines))], )76 for _filtered, duplet77 in zip(filter_line, lines)]78 dump_experiment('no_weights_lowest_%d_percent_set_to_random' % (removal_value*100), corrected_lines)79 random.shuffle(filter_line)80 corrected_lines = [(duplet[0],) for _filtered, duplet in zip(filter_line, lines) if _filtered]81 dump_experiment('no_weights_random_%d_percent_removed' % (removal_value*100), corrected_lines)82 random.shuffle(filter_line)83 corrected_lines = [(duplet[0],)84 if _filtered85 else (background_lines[random.randint(0, len(background_lines))], )86 for _filtered, duplet87 in zip(filter_line, lines)]88 dump_experiment('no_weights_random_%d_percent_set_to_random' % (removal_value*100), corrected_lines)89dump_experiment('', [], dump_out=True)...

Full Screen

Full Screen

log.py

Source:log.py Github

copy

Full Screen

1import re, json2import urllib, ssl3def format_line(line):4 json_line = {5 'deviceName': '',6 'processId': 0,7 'processName': '',8 'description': '',9 'timeWindow': '',10 'numberOfOccurrence': 011 }12 pattern = r'\w+ \d+ (\d+):\d+:\d+ (\w+) ([\.|\w+]+)\[(\d+)\]( \(([\.|\w]+)\[(\d+)\]\))?: (.*(\n?.*)*)'13 filter_line = re.match(pattern, line)14 if filter_line:15 json_line["deviceName"] = filter_line.group(2)16 json_line['timeWindow'] = filter_line.group(1)17 json_line['description'] = filter_line.group(8)18 if filter_line.group(6) != None:19 json_line['processName'] = filter_line.group(6)20 else:21 json_line['processName'] = filter_line.group(3)22 if filter_line.group(7) != None:23 json_line['processId'] = filter_line.group(7)24 else:25 json_line['processId'] = filter_line.group(4)26 return json_line27def post_json(list):28 ctx = ssl.create_default_context()29 ctx.check_hostname = False30 ctx.verify_mode = ssl.CERT_NONE31 for i in list:32 params = urllib.urlencode(i)33 # f = urllib.urlopen("https://foo.com/bar", params, context=ctx)34 # print(f.read())35 print(json.dumps(i))36def main(startword, file_path):37 file = open(file_path, 'r')38 new_line = ''39 line_list = []40 try:41 while True:42 text_line = file.readline()43 # check multiline44 if text_line.startswith(startword):45 new_line = format_line(new_line)46 line_list.append(new_line)47 new_line = text_line48 elif text_line and not text_line.startswith(startword):49 new_line = new_line + text_line50 else:51 new_line = format_line(new_line)52 line_list.append(new_line)53 break54 finally:55 file.close()56 # remove duplicate line57 tmp_list = line_list58 for i in tmp_list:59 for j in line_list:60 if i["timeWindow"] == j["timeWindow"] and i["processId"] == j["processId"] and i["description"] == j["description"]:61 i["numberOfOccurrence"] = i["numberOfOccurrence"] + 162 line_list = []63 for i in tmp_list:64 if i not in line_list:65 line_list.append(i)66 post_json(line_list)67if __name__ == "__main__":...

Full Screen

Full Screen

xls_models_tools.py

Source:xls_models_tools.py Github

copy

Full Screen

1from collections import defaultdict2def filter_line(txt, escape='\t'):3 return txt.split(escape)4def remove_undesired(txt, escapes=('', '\t', '\n')):5 return [x for x in txt if x not in escapes]6def corruption_level(txt, escape='.'):7 entire = txt.split(escape)8 return (entire[-2], entire[-1]) if len(entire) > 1 else (entire[-1], None)9def extract_results(filepath, reveal_error=False):10 with open(filepath, 'r') as infile:11 data = infile.readlines()12 models = remove_undesired(filter_line(data[0]))13 results = {model: defaultdict(dict) for model in models}14 for i in data[1:]:15 values = filter_line(i)16 corruption, level = corruption_level(values[0])17 for model, val in zip(models, values[1:]):18 res = 1 - float(val) if reveal_error else float(val)19 if level is None:20 results[model][corruption] = res21 else:22 results[model][corruption][level] = res23 return results24def extract_results_by_corruption(filepath, reveal_error=False):25 results = defaultdict(dict)26 with open(filepath, 'r') as infile:27 data = infile.readlines()28 models = remove_undesired(filter_line(data[0]))29 for i in data[1:]:30 values = filter_line(i)31 corruption, level = corruption_level(values[0])32 for model, val in zip(models, values[1:]):33 res = 1 - float(val) if reveal_error else float(val)34 if level is None:35 results[corruption][model] = res36 elif level not in results[corruption]:37 results[corruption][level] = {model : res}38 else:39 results[corruption][level][model] = res40 return results41def mean_dict(dict_values):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful