How to use should_be_kept method in localstack

Best Python code snippet using localstack_python

plot_configurations.py

Source:plot_configurations.py Github

copy

Full Screen

1import pickle2import numpy as np3LOGS_PATH = "./all_logs/toate_logs_2022_02_06-23_13_10.pkl"4with open(LOGS_PATH, mode="rb") as pickle_file:5 logs = pickle.load(pickle_file)6def get_configs_with_attr_value(logs, attr_vals):7 query_result = {8 "query": f"{attr_vals}",9 "configs": [],10 }11 for config_name in logs:12 config = logs[config_name]["configuration"]13 should_be_kept = True14 for attr, val in attr_vals:15 if not (attr in config and config[attr] == val):16 should_be_kept = False17 break18 if should_be_kept is True:19 query_result["configs"].append(config_name)20 return query_result21def get_lines(logs, line_names, line_style, xs_dict_keys, ys_dict_keys):22 lines = []23 for line_name in line_names:24 line = {25 "line_name": line_name,26 "line_style": line_style,27 "xs_dict_keys": [line_name] + xs_dict_keys,28 "ys_dict_keys": [line_name] + ys_dict_keys,29 }30 lines.append(line)31 return lines32def get_particles_space_plot_config(33 logs, search_space_max, num_particles, xs_dict_keys, ys_dict_keys, x_label, y_label34):35 target_configs = get_configs_with_attr_value(36 logs, [("search_space_max", search_space_max), ("num_particles", num_particles)]37 )["configs"]38 return {39 "type": "graph",40 "name": f"Models with particles {num_particles} and search_space_max {search_space_max} {x_label} vs {y_label}",41 "x_label": x_label,42 "y_label": y_label,43 "lines": (44 get_lines(45 logs, line_names=target_configs, line_style="solid",46 xs_dict_keys=xs_dict_keys, ys_dict_keys=ys_dict_keys47 )48 )49 }50def get_particles_space_plot_configs(51 logs, search_space_max_options, num_particles_options, xs_dict_keys, ys_dict_keys, x_label, y_label52):53 plot_configs = []54 for search_space_max in search_space_max_options:55 for num_particles in num_particles_options:56 plot_config = get_particles_space_plot_config(57 logs, search_space_max=search_space_max, num_particles=num_particles,58 xs_dict_keys=xs_dict_keys, ys_dict_keys=ys_dict_keys,59 x_label=x_label, y_label=y_label60 )61 plot_configs.append(plot_config)62 return plot_configs63def sort_models_by_metric(logs, metric_key): # logs_ys_eval_accuracy_epochs64 candidates = []65 for model_name in logs:66 candidate_best_metric = np.max(logs[model_name][metric_key])67 candidate_best_metric_epoch = np.argmax(logs[model_name][metric_key]) + 168 candidates.append((candidate_best_metric, candidate_best_metric_epoch, model_name))69 candidates.sort(reverse=True)70 return candidates71print(f"=========================================")72models_sorted_by_acc_particles_10 = []73models_sorted_by_acc_particles_100 = []74models_sorted_by_acc_particles_1000 = []75print(f"Models sorted by best test accuracy:")76print(f"Optimizer\t& Num particles\t& $V_{{max}}$\t& $SearchSpace_{{min}}$\t& $SearchSpace_{{max}}$\t& $\\varphi_1$\t& $\\varphi_2$\t& Test acc\t\\\\")77print(f"\\midrule")78models_sorted_by_acc = sort_models_by_metric(logs, "logs_ys_eval_accuracy_epochs")79for i_model, (acc, best_epoch, model_name) in enumerate(models_sorted_by_acc):80 # print(f"[{i_model + 1}]\t{acc}@e{best_epoch}\t{model_name}") # Non-Latex81 config = logs[model_name]["configuration"]82 optimizer = config["optimizer"]83 num_particles = config.get("num_particles", "-")84 v_max = config.get("v_max", "-")85 search_space_min = config.get("search_space_min", "-")86 search_space_max = config.get("search_space_max", "-")87 cognitive_coeff = config.get("cognitive_coeff", "-")88 social_coeff = config.get("social_coeff", "-")89 print(f"{optimizer}\t& {num_particles}\t& {v_max}\t& {search_space_min}\t& {search_space_max}\t& {cognitive_coeff}\t& {social_coeff}\t& {acc*100:.2f}\\%\t\\\\")90 if "num_particles" not in config:91 continue92 if config["num_particles"] == 10:93 models_sorted_by_acc_particles_10.append((acc, best_epoch, model_name))94 elif config["num_particles"] == 100:95 models_sorted_by_acc_particles_100.append((acc, best_epoch, model_name))96 elif config["num_particles"] == 1000:97 models_sorted_by_acc_particles_1000.append((acc, best_epoch, model_name))98print(f"\\bottomrule")99print(f"=========================================")100print()101print(f"=========================================")102print(f"10 Particles Models sorted by best test accuracy:")103print(f"Optimizer\t& Num particles\t& $V_{{max}}$\t& $SearchSpace_{{min}}$\t& $SearchSpace_{{max}}$\t& $\\varphi_1$\t& $\\varphi_2$\t& Test acc\t\\\\")104print(f"\\midrule")105for i_model, (acc, best_epoch, model_name) in enumerate(models_sorted_by_acc_particles_10):106 # print(f"[{i_model + 1}]\t{acc}@e{best_epoch}\t{model_name}")107 config = logs[model_name]["configuration"]108 optimizer = config["optimizer"]109 num_particles = config.get("num_particles", "-")110 v_max = config.get("v_max", "-")111 search_space_min = config.get("search_space_min", "-")112 search_space_max = config.get("search_space_max", "-")113 cognitive_coeff = config.get("cognitive_coeff", "-")114 social_coeff = config.get("social_coeff", "-")115 print(f"{optimizer}\t& {num_particles}\t& {v_max}\t& {search_space_min}\t& {search_space_max}\t& {cognitive_coeff}\t& {social_coeff}\t& {acc*100:.2f}\\%\t\\\\")116print(f"\\bottomrule")117print(f"=========================================")118print()119print(f"=========================================")120print(f"100 Particles Models sorted by best test accuracy:")121print(f"Optimizer\t& Num particles\t& $V_{{max}}$\t& $SearchSpace_{{min}}$\t& $SearchSpace_{{max}}$\t& $\\varphi_1$\t& $\\varphi_2$\t& Test acc\t\\\\")122print(f"\\midrule")123for i_model, (acc, best_epoch, model_name) in enumerate(models_sorted_by_acc_particles_100):124 # print(f"[{i_model + 1}]\t{acc}@e{best_epoch}\t{model_name}")125 config = logs[model_name]["configuration"]126 optimizer = config["optimizer"]127 num_particles = config.get("num_particles", "-")128 v_max = config.get("v_max", "-")129 search_space_min = config.get("search_space_min", "-")130 search_space_max = config.get("search_space_max", "-")131 cognitive_coeff = config.get("cognitive_coeff", "-")132 social_coeff = config.get("social_coeff", "-")133 print(f"{optimizer}\t& {num_particles}\t& {v_max}\t& {search_space_min}\t& {search_space_max}\t& {cognitive_coeff}\t& {social_coeff}\t& {acc*100:.2f}\\%\t\\\\")134print(f"\\bottomrule")135print(f"=========================================")136print()137print(f"=========================================")138print(f"1000 Particles Models sorted by best test accuracy:")139print(f"Optimizer\t& Num particles\t& $V_{{max}}$\t& $SearchSpace_{{min}}$\t& $SearchSpace_{{max}}$\t& $\\varphi_1$\t& $\\varphi_2$\t& Test acc\t\\\\")140print(f"\\midrule")141for i_model, (acc, best_epoch, model_name) in enumerate(models_sorted_by_acc_particles_1000):142 # print(f"[{i_model + 1}]\t{acc}@e{best_epoch}\t{model_name}")143 config = logs[model_name]["configuration"]144 optimizer = config["optimizer"]145 num_particles = config.get("num_particles", "-")146 v_max = config.get("v_max", "-")147 search_space_min = config.get("search_space_min", "-")148 search_space_max = config.get("search_space_max", "-")149 cognitive_coeff = config.get("cognitive_coeff", "-")150 social_coeff = config.get("social_coeff", "-")151 print(f"{optimizer}\t& {num_particles}\t& {v_max}\t& {search_space_min}\t& {search_space_max}\t& {cognitive_coeff}\t& {social_coeff}\t& {acc*100:.2f}\\%\t\\\\")152print(f"\\bottomrule")153print(f"=========================================")154print()155models_sorted_by_acc_top3 = (156 models_sorted_by_acc[:2] +157 models_sorted_by_acc_particles_1000[:3] +158 models_sorted_by_acc_particles_100[:3] +159 models_sorted_by_acc_particles_10[:3]160)161print(f"=========================================")162print(f"Top 3 Models for all number of particles sorted by best test accuracy:")163print(f"Optimizer\t& Num particles\t& $V_{{max}}$\t& $SearchSpace_{{min}}$\t& $SearchSpace_{{max}}$\t& $\\varphi_1$\t& $\\varphi_2$\t& Test acc\t\\\\")164print(f"\\midrule")165for i_model, (acc, best_epoch, model_name) in enumerate(models_sorted_by_acc_top3):166 # print(f"[{i_model + 1}]\t{acc}@e{best_epoch}\t{model_name}")167 config = logs[model_name]["configuration"]168 optimizer = config["optimizer"]169 num_particles = config.get("num_particles", "-")170 v_max = config.get("v_max", "-")171 search_space_min = config.get("search_space_min", "-")172 search_space_max = config.get("search_space_max", "-")173 cognitive_coeff = config.get("cognitive_coeff", "-")174 social_coeff = config.get("social_coeff", "-")175 print(f"{optimizer}\t& {num_particles}\t& {v_max}\t& {search_space_min}\t& {search_space_max}\t& {cognitive_coeff}\t& {social_coeff}\t& {acc*100:.2f}\\%\t\\\\")176print(f"\\bottomrule")177print(f"=========================================")178print()179SEARCH_SPACE_MAX_OPTIONS = [0.21, 0.1, 0.005]180NUM_PARTICLES_OPTIONS = [10, 100, 1000]181CONFIGS_GRADIENT = (182 get_configs_with_attr_value(logs, [("optimizer", "sgd")])["configs"]183 + get_configs_with_attr_value(logs, [("optimizer", "adam")])["configs"]184)185CONFIGS_PARTICLES10 = get_configs_with_attr_value(logs, [("num_particles", 10)])["configs"]186CONFIGS_PARTICLES100 = get_configs_with_attr_value(logs, [("num_particles", 100)])["configs"]187CONFIGS_PARTICLES1000 = get_configs_with_attr_value(logs, [("num_particles", 1000)])["configs"]188CONFIGS_PARTICLES10_TOP10 = [model_name for (acc, best_epoch, model_name) in models_sorted_by_acc_particles_10[:10]]189CONFIGS_PARTICLES100_TOP10 = [model_name for (acc, best_epoch, model_name) in models_sorted_by_acc_particles_100[:10]]190CONFIGS_PARTICLES1000_TOP10 = [model_name for (acc, best_epoch, model_name) in models_sorted_by_acc_particles_1000[:10]]191CONFIGS_PARTICLES10_TOP3 = [model_name for (acc, best_epoch, model_name) in models_sorted_by_acc_particles_10[:3]]192CONFIGS_PARTICLES100_TOP3 = [model_name for (acc, best_epoch, model_name) in models_sorted_by_acc_particles_100[:3]]193CONFIGS_PARTICLES1000_TOP3 = [model_name for (acc, best_epoch, model_name) in models_sorted_by_acc_particles_1000[:3]]194CONFIGS_ALL = CONFIGS_GRADIENT + CONFIGS_PARTICLES10 + CONFIGS_PARTICLES100 + CONFIGS_PARTICLES1000195PLOT_CONFIGS_ALL = [196 {197 "type": "graph",198 "name": "All models Epoch vs Test accuracy",199 "x_label": "Num train epochs",200 "y_label": "Test accuracy",201 "lines": (202 get_lines(logs, line_names=CONFIGS_GRADIENT, line_style="solid", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +203 get_lines(logs, line_names=CONFIGS_PARTICLES10, line_style="dotted", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +204 get_lines(logs, line_names=CONFIGS_PARTICLES100, line_style="dashed", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +205 get_lines(logs, line_names=CONFIGS_PARTICLES1000, line_style="dashdot", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"])206 )207 }208]209PLOT_CONFIGS_PARTICLES10 = [210 {211 "type": "graph",212 "name": "All models Epoch vs Test accuracy",213 "x_label": "Num train epochs",214 "y_label": "Test accuracy",215 "lines": (216 get_lines(logs, line_names=CONFIGS_PARTICLES10, line_style="dotted", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"])217 )218 }219]220PLOT_CONFIGS_PARTICLES_SEARCH_SPACE = get_particles_space_plot_configs(221 logs,222 SEARCH_SPACE_MAX_OPTIONS,223 NUM_PARTICLES_OPTIONS,224 xs_dict_keys=["logs_xs_epochs"],225 ys_dict_keys=["logs_ys_eval_accuracy_epochs"],226 x_label="Train epochs",227 y_label="Test accuracy"228)229PLOT_CONFIGS_PARTICLES_TOP10 = [230 {231 "type": "graph",232 "name": "Top 10 10Particles models Epoch vs Test accuracy",233 "x_label": "Num train epochs",234 "y_label": "Test accuracy",235 "lines": (236 get_lines(logs, line_names=CONFIGS_GRADIENT, line_style="solid", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +237 get_lines(logs, line_names=CONFIGS_PARTICLES10_TOP10, line_style="dotted", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"])238 )239 },240 {241 "type": "graph",242 "name": "Top 10 100Particles models Epoch vs Test accuracy",243 "x_label": "Num train epochs",244 "y_label": "Test accuracy",245 "lines": (246 get_lines(logs, line_names=CONFIGS_GRADIENT, line_style="solid", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +247 get_lines(logs, line_names=CONFIGS_PARTICLES100_TOP10, line_style="dashed", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"])248 )249 },250 {251 "type": "graph",252 "name": "Top 10 1000Particles models Epoch vs Test accuracy",253 "x_label": "Num train epochs",254 "y_label": "Test accuracy",255 "lines": (256 get_lines(logs, line_names=CONFIGS_GRADIENT, line_style="solid", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +257 get_lines(logs, line_names=CONFIGS_PARTICLES1000_TOP10, line_style="dashdot", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"])258 )259 }260]261PLOT_CONFIGS_PARTICLES_TOP3 = [262 {263 "type": "graph",264 "name": "Top 3 from each num Particles models Epoch vs Test accuracy",265 "x_label": "Num train epochs",266 "y_label": "Test accuracy",267 "lines": (268 get_lines(logs, line_names=CONFIGS_GRADIENT, line_style="solid", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +269 get_lines(logs, line_names=CONFIGS_PARTICLES10_TOP10[:3], line_style="dotted", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +270 get_lines(logs, line_names=CONFIGS_PARTICLES100_TOP10[:3], line_style="dashed", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +271 get_lines(logs, line_names=CONFIGS_PARTICLES1000_TOP10[:3], line_style="dashdot", xs_dict_keys=["logs_xs_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"])272 )273 },274 {275 "type": "graph",276 "name": "Top 3 from each num Particles models Training duration vs Test accuracy",277 "x_label": "Training duration (s)",278 "y_label": "Test accuracy",279 "lines": (280 get_lines(logs, line_names=CONFIGS_GRADIENT, line_style="solid", xs_dict_keys=["logs_ys_durations_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +281 get_lines(logs, line_names=CONFIGS_PARTICLES10_TOP10[:3], line_style="dotted", xs_dict_keys=["logs_xs_durations_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +282 get_lines(logs, line_names=CONFIGS_PARTICLES100_TOP10[:3], line_style="dashed", xs_dict_keys=["logs_xs_durations_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"]) +283 get_lines(logs, line_names=CONFIGS_PARTICLES1000_TOP10[:3], line_style="dashdot", xs_dict_keys=["logs_xs_durations_epochs"], ys_dict_keys=["logs_ys_eval_accuracy_epochs"])284 )285 }...

Full Screen

Full Screen

spot-the-difference.py

Source:spot-the-difference.py Github

copy

Full Screen

1import json2import os3import argparse4import copy5NEWLINE="\n"6def BFS(top_dict, looking_for_key):7 # queue8 dict_q = [top_dict] # list of dictionaries9 # magic math10 for d in dict_q:11 dict_keys = d.keys()12 for k in dict_keys:13 if k == looking_for_key:14 return d[k] 15 if isinstance(d[k], dict):16 dict_q.append(d[k])17 return ""18def find_key_with_value (key, value, element_dictionary):19 return BFS(element_dictionary, key) == value20def get_ignore_list_file_path():21 return os.path.join(os.getcwd(), "config", "ignore.json")22def get_acm_leaks_list_file_path():23 return os.path.join(os.getcwd(), "config", "acm-leaks.json")24def read_json_file(file_path):25 if file_path:26 with open(file_path) as f:27 return json.load(f)28def read_scan_file_as_dict(file_path):29 _resource_info_dict = dict()30 _details_file=file_path31 _read_file = open(_details_file, 'r') 32 while True:33 # _resource_type=_read_file.readline().strip()34 # if not _resource_type:35 # break36 _resource_details_json=_read_file.readline().strip()37 if not _resource_details_json:38 break39 _resource_details_dict=json.loads(_resource_details_json)40 if _resource_details_dict:41 _resource_info_dict.update(_resource_details_dict)42 # _resource_info_dict[_resource_type]=_resource_details_dict43 _read_file.close ()44 return _resource_info_dict45def simplify_dict_list(dict_of_lists):46 _attributes_we_care_about = ["kind", "name", "namespace", "pod-template-hash"]47 _dict_keys=dict_of_lists.keys()48 _simplified_dict_list = dict()49 for k in _dict_keys:50 _simplified_dict_list[k] = []51 _resource_list = dict_of_lists[k]52 for r in _resource_list: # r is a dict for a particular resource53 _simplified_resource = dict()54 for a in _attributes_we_care_about:55 _simplified_resource[a] = BFS(r, a)56 _simplified_dict_list[k].append(_simplified_resource)57 return _simplified_dict_list58def fix_name (dict_of_lists):59 _short_name_dict = dict()60 _dict_keys=dict_of_lists.keys()61 for k in _dict_keys:62 _short_name_dict[k] = []63 _resource_list = dict_of_lists[k]64 for r in _resource_list: # r is a dict for a particular resource65 _short_name_resource = dict()66 _short_name_resource["kind"] = r["kind"]67 if r["pod-template-hash"] != "":68 _short_name_resource["name"] = r["name"].split(r["pod-template-hash"])[0]69 else:70 _short_name_resource["name"] = r["name"]71 _short_name_resource["namespace"] = r["namespace"]72 _short_name_dict[k].append(_short_name_resource)73 return _short_name_dict74def diffTheLists(list_one, list_two):75 results = {76 "both": [],77 "added": [],78 "removed": []79 }80 # gotta check both directions81 for n in list_one:82 if n in list_two:83 results["both"].append(n)84 else:85 results["removed"].append(n)86 87 for n in list_two:88 if n not in list_one:89 results["added"].append(n)90 return results91def spotTheDifference(json_dict_one, json_dict_two):92 results = {93 "both": dict(),94 "added": dict(),95 "removed": dict()96 }97 _resource_list_one=json_dict_one.keys()98 _resource_list_two=json_dict_two.keys()99 _resource_diffs = diffTheLists(_resource_list_one, _resource_list_two)100 for a in _resource_diffs["added"]:101 if len(json_dict_two[a]) > 0: 102 results["added"][a] = json_dict_two[a]103 for r in _resource_diffs["removed"]:104 results["removed"][r] = json_dict_one[r]105 for b in _resource_diffs["both"]:106 _row_diffs = diffTheLists(json_dict_one[b], json_dict_two[b])107 for a in _row_diffs["added"]:108 if b not in results["added"]:109 results["added"][b] = []110 results["added"][b].append(a)111 for r in _row_diffs["removed"]:112 if b not in results["removed"]:113 results["removed"][b] = []114 results["removed"][b].append(r)115 for bo in _row_diffs["both"]:116 if b not in results["both"]:117 results["both"][b] = []118 results["both"][b].append(bo)119 return results120# this returns a (sub) set of the list_of_resources, having removed remove_resource121def usefulFunctNoGoodName(list_of_resources, remove_resource):122 # list_of_resources_copy = copy.deepcopy(list_of_resources)123 list_of_resources_subset = []124 for r in list_of_resources:125 remove_resource_keys = remove_resource.keys()126 should_be_kept = False127 for key in remove_resource_keys:128 if not find_key_with_value(key, remove_resource[key], r): 129 should_be_kept = True130 break131 if should_be_kept:132 list_of_resources_subset.append(r)133 return list_of_resources_subset134def removeIgnoredItems(res_dict, ignore_list):135 if len(ignore_list) == 0:136 return res_dict137 ignore_list_copy = copy.deepcopy(ignore_list)138 for remove_resource in ignore_list_copy:139 if "kind" in remove_resource.keys(): #if kind specified140 resource_kind = remove_resource["kind"]141 if resource_kind in res_dict.keys() and len(remove_resource.keys()) == 1: # if the entire kind should be removed (and that kind exists)142 res_dict.pop(resource_kind, None)143 continue144 # else, don't remove _entire_ kind145 remove_resource.pop("kind", None) # don't check kind anymore146 if resource_kind in res_dict.keys() and res_dict[resource_kind]: # if kind even exists in file147 res_dict[resource_kind] = usefulFunctNoGoodName(res_dict[resource_kind], remove_resource)148 else: # kind not specified, gotta loop through each kind149 for resource_kind in res_dict.keys():150 if res_dict[resource_kind]:151 res_dict[resource_kind] = usefulFunctNoGoodName(res_dict[resource_kind], remove_resource) 152 return res_dict153def removeEmptyResults(res_dict):154 copy_dict=copy.deepcopy(res_dict)155 dict_keys = copy_dict.keys()156 for k in dict_keys:157 if type(copy_dict[k]) is list and not copy_dict[k]: #empty list158 del res_dict[k]159 return res_dict160 161def writeJSON(file_path, res_dict):162 _file = open(file_path, "w")163 _file.write(json.dumps(res_dict, indent=2))164 _file.close()165def countTotals(res_dict):166 typeCount = 0167 totalCount = 0168 for x in res_dict: 169 typeCount += 1170 totalCount += len(res_dict[x])171 return typeCount, totalCount172def main():173 parser = argparse.ArgumentParser()174 parser.add_argument("-f", "--first_file", required=True)175 parser.add_argument("-s", "--second_file", required=True)176 parser.add_argument("-o", "--output_tag", required=True)177 args = vars(parser.parse_args())178 _first_file_path = args['first_file']179 _second_file_path = args['second_file']180 _output_tag = args['output_tag']181 _first_resource_dict_list = read_scan_file_as_dict(_first_file_path)182 _second_resource_dict_list = read_scan_file_as_dict(_second_file_path)183 _first_resource_dict_list_simplified = simplify_dict_list(_first_resource_dict_list)184 _second_resource_dict_list_simplified = simplify_dict_list(_second_resource_dict_list)185 _first_resource_dict_list_namefixed = fix_name(_first_resource_dict_list_simplified)186 _second_resource_dict_list_namefixed = fix_name(_second_resource_dict_list_simplified)187 188 _results = spotTheDifference(_first_resource_dict_list_namefixed, _second_resource_dict_list_namefixed)189 _res_both = _results["both"]190 _res_added = _results["added"]191 _res_removed = _results["removed"]192 _ignore_list = read_json_file(get_ignore_list_file_path())193 _acm_leaks_list = read_json_file(get_acm_leaks_list_file_path())194 _all_ignores_list = _ignore_list + _acm_leaks_list195 _res_both = removeIgnoredItems(_res_both, _all_ignores_list)196 _res_added = removeIgnoredItems(_res_added, _all_ignores_list)197 _res_removed = removeIgnoredItems(_res_removed, _all_ignores_list)198 _res_both = removeEmptyResults(_results["both"])199 _res_added = removeEmptyResults(_results["added"])200 _res_removed = removeEmptyResults(_results["removed"])201 typeCount, totalCount = countTotals(_res_added)202 with open('./results/count.txt', 'w') as f:203 f.write('Total Leak Count: '+str(totalCount))204 f.write('\n')205 f.write('Leaked Resource Kinds: '+str(typeCount))206 f.write('\n')207 # writeJSON("./results/both-results-"+_output_tag+".json", _res_both)208 # writeJSON("./results/removed-results-"+_output_tag+".json", _res_removed)209 writeJSON("./results/leaks-"+_output_tag+".json", _res_added)...

Full Screen

Full Screen

trim.py

Source:trim.py Github

copy

Full Screen

1from ...imports import *2def trim_times(self, just_edges=True, when_to_give_up=1, minimum_acceptable_ok=1):3 """4 Trim times that are all (or mostly) not numbers.5 Parameters6 ----------7 just_edges : bool8 Should we only trim the outermost bad time bins?9 `True` = Just trim off the bad edges and keep10 interior bad values. Keeping interior data, even if11 they're all bad, often helps to make for more12 intuititive imshow plots.13 `False` = Trim off every bad time, whether it's on14 the edge or somewhere in the middle of the dataset.15 The resulting Rainbow will be smaller, but it might16 be a little tricky to visualize with imshow.17 when_to_give_up : float18 The fraction of wavelengths that must be nan or not OK19 for the entire time to be considered bad (default = 1).20 `1.0` = trim only if all wavelengths are bad21 `0.5` = trim if more than 50% of wavelengths are bad22 `0.0` = trim if any wavelengths are bad23 minimum_acceptable_ok : float24 The numbers in the `.ok` attribute express "how OK?" each25 data point is, ranging from 0 (not OK) to 1 (super OK).26 In most cases, `.ok` will be binary, but there may be times27 where it's intermediate (for example, if a bin was created28 from some data that were not OK and some that were).29 The `minimum_acceptable_ok` parameter allows you to specify what30 level of OK-ness for a point to not get trimmed.31 """32 # create a history entry for this action (before other variables are defined)33 h = self._create_history_entry("trim_times", locals())34 # figure out which times should be considered bad35 is_nan = np.isnan(self.flux)36 isnt_ok = self.ok < minimum_acceptable_ok37 fraction_bad = np.sum(is_nan | isnt_ok, axis=self.waveaxis) / self.nwave38 should_be_kept = fraction_bad < when_to_give_up39 # only make cuts on the edges (if desired)40 if just_edges:41 isnt_before_first = np.cumsum(should_be_kept) > 042 isnt_after_last = (np.cumsum(should_be_kept[::-1]) > 0)[::-1]43 isnt_edge = isnt_before_first & isnt_after_last44 should_be_kept = should_be_kept | isnt_edge45 # actually try the Rainbow46 new = self[:, should_be_kept]47 new._remove_last_history_entry()48 # append the history entry to the new Rainbow49 new._record_history_entry(h)50 # return the new Rainbow51 return new52def trim_wavelengths(self, just_edges=True, when_to_give_up=1, minimum_acceptable_ok=1):53 """54 Trim wavelengths that are all (or mostly) not numbers.55 Parameters56 ----------57 just_edges : bool58 Should we only trim the outermost bad wavelength bins?59 `True` = Just trim off the bad edges and keep60 interior bad values. Keeping interior data, even if61 they're all bad, often helps to make for more62 intuititive imshow plots.63 `False` = Trim off every bad wavelength, whether it's on64 the edge or somewhere in the middle of the dataset.65 The resulting Rainbow will be smaller, but it might66 be a little tricky to visualize with imshow.67 when_to_give_up : float68 The fraction of times that must be nan or not OK69 for the entire wavelength to be considered bad (default = 1).70 `1.0` = trim only if all times are bad71 `0.5` = trim if more than 50% of times are bad72 `0.0` = trim if any times are bad73 minimum_acceptable_ok : float74 The numbers in the `.ok` attribute express "how OK?" each75 data point is, ranging from 0 (not OK) to 1 (super OK).76 In most cases, `.ok` will be binary, but there may be times77 where it's intermediate (for example, if a bin was created78 from some data that were not OK and some that were).79 The `minimum_acceptable_ok` parameter allows you to specify what80 level of OK-ness for a point to not get trimmed.81 """82 # create a history entry for this action (before other variables are defined)83 h = self._create_history_entry("trim_wavelengths", locals())84 # figure out which wavelengths should be considered bad85 is_nan = np.isnan(self.flux)86 isnt_ok = self.ok < minimum_acceptable_ok87 fraction_bad = np.sum(is_nan | isnt_ok, axis=self.timeaxis) / self.ntime88 should_be_kept = fraction_bad < when_to_give_up89 # only make cuts on the edges (if desired)90 if just_edges:91 isnt_before_first = np.cumsum(should_be_kept) > 092 isnt_after_last = (np.cumsum(should_be_kept[::-1]) > 0)[::-1]93 isnt_edge = isnt_before_first & isnt_after_last94 should_be_kept = should_be_kept | isnt_edge95 # actually try the Rainbow96 new = self[should_be_kept, :]97 new._remove_last_history_entry()98 # append the history entry to the new Rainbow99 new._record_history_entry(h)100 # return the new Rainbow101 return new102def trim(self, just_edges=True, when_to_give_up=1, minimum_acceptable_ok=1):103 """104 just_edges : bool105 Should we only trim the outermost bad wavelength bins?106 `True` = Just trim off the bad edges and keep107 interior bad values. Keeping interior data, even if108 they're all bad, often helps to make for more109 intuititive imshow plots.110 `False` = Trim off every bad wavelength, whether it's on111 the edge or somewhere in the middle of the dataset.112 The resulting Rainbow will be smaller, but it might113 be a little tricky to visualize with imshow.114 when_to_give_up : float115 The fraction of times that must be nan or not OK116 for the entire wavelength to be considered bad (default = 1).117 `1.0` = trim only if all times are bad118 `0.5` = trim if more than 50% of times are bad119 `0.0` = trim if any times are bad120 minimum_acceptable_ok : float121 The numbers in the `.ok` attribute express "how OK?" each122 data point is, ranging from 0 (not OK) to 1 (super OK).123 In most cases, `.ok` will be binary, but there may be times124 where it's intermediate (for example, if a bin was created125 from some data that were not OK and some that were).126 The `minimum_acceptable_ok` parameter allows you to specify what127 level of OK-ness for a point to not get trimmed.128 """129 trimmed = self.trim_times(130 when_to_give_up=when_to_give_up,131 just_edges=just_edges,132 minimum_acceptable_ok=minimum_acceptable_ok,133 )134 trimmed = trimmed.trim_wavelengths(135 when_to_give_up=when_to_give_up,136 just_edges=just_edges,137 minimum_acceptable_ok=minimum_acceptable_ok,138 )...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful