How to use diff_configs method in autotest

Best Python code snippet using autotest_python

mdpp_to_cave.py

Source:mdpp_to_cave.py Github

copy

Full Screen

1import numpy as np2import pandas as pd3import argparse, os4import json5from mdp_playground.analysis import MDPP_Analysis6from cave.cavefacade import CAVE7class MDPPToCave:8 def __init__(self):9 return10 def _create_configspace_json(self, stats_pd, var_configs):11 configspace = {12 "hyperparameters": [],13 "conditions": [],14 "forbiddens": [],15 "python_module_version": "0.4.11",16 "json_format_version": 0.1,17 }18 for param in var_configs:19 param_config = {"name": param}20 var_type = str(type(stats_pd[param].iloc[0]))21 if "int" in var_type or "bool" in var_type:22 param_config["lower"] = int(stats_pd[param].min())23 param_config["upper"] = int(stats_pd[param].max())24 param_config["default"] = int(25 param_config["lower"] + param_config["upper"] // 226 )27 param_config["type"] = "uniform_int"28 elif "str" in var_type:29 # Categorical30 param_config["type"] = "categorical"31 param_config["choices"] = list(stats_pd["conv_filters"].unique())32 param_config["default"] = param_config["choices"][0]33 else: # Float34 param_config["lower"] = float(stats_pd[param].min())35 param_config["upper"] = float(stats_pd[param].max())36 param_config["default"] = (37 param_config["lower"] + param_config["upper"]38 ) / 239 param_config["type"] = "uniform_float"40 if "lr" in param:41 param_config["log"] = True42 else:43 param_config["log"] = False44 configspace["hyperparameters"].append(param_config)45 return configspace46 def _create_run_history(47 self, stats_pd, seed_idx, col_names, output_folder, var_configs48 ):49 final_rows_for_a_config = []50 for i in range(stats_pd.shape[0] - 1):51 if (52 stats_pd["timesteps_total"].iloc[i]53 > stats_pd["timesteps_total"].iloc[i + 1]54 ):55 final_rows_for_a_config.append(i)56 final_rows_for_a_config.append(57 stats_pd.shape[0] - 158 ) # Always append the last row!59 runhistory_col_names = ["cost", "time", "status", "budget", "seed"]60 runhistory_df = pd.DataFrame(columns=runhistory_col_names)61 runhistory_df["cost"] = (62 -1 * stats_pd["episode_reward_mean"].iloc[final_rows_for_a_config]63 )64 runhistory_df["time"] = stats_pd["episode_len_mean"].iloc[65 final_rows_for_a_config66 ]67 runhistory_df["budget"] = stats_pd["timesteps_total"].iloc[68 final_rows_for_a_config69 ]70 if seed_idx > 0:71 runhistory_df["seed"] = stats_pd[col_names[seed_idx]].iloc[72 final_rows_for_a_config73 ]74 else:75 runhistory_df["seed"] = 076 runhistory_df["status"] = "SUCCESS"77 for var in var_configs:78 runhistory_df[var] = stats_pd[var].iloc[final_rows_for_a_config]79 return runhistory_df80 def join_files(self, file_prefix, file_suffix):81 """Utility to join files that were written with different experiment configs"""82 with open(file_prefix + file_suffix, "ab") as combined_file:83 i = 084 num_diff_lines = []85 while True:86 if os.path.isfile(file_prefix + "_" + str(i) + file_suffix):87 with open(88 file_prefix + "_" + str(i) + file_suffix, "rb"89 ) as curr_file:90 byte_string = curr_file.read()91 newline_count = byte_string.count(10)92 num_diff_lines.append(newline_count)93 combined_file.write(byte_string)94 else:95 break96 i += 197 print(98 str(i)99 + " files were combined into 1 for file:"100 + file_prefix101 + file_suffix102 )103 # print("Files missing for config_nums:", missing_configs, ". Did you pass the right value for max_total_configs as an argument?")104 # print("Unique line count values:", np.unique(num_diff_lines))105 if i == 0:106 raise FileNotFoundError(107 "No files to combine were present. Please check your location and/or filenames that they are correct."108 )109 def _read_stats(self, stats_file):110 if os.path.isfile(stats_file + ".csv"):111 print(112 "\033[1;31mLoading data from a sequential run/already combined runs of experiment configurations.\033[0;0m"113 )114 else:115 print(116 "\033[1;31mLoading data from a distributed run of experiment configurations. Creating a combined CSV stats file.\033[0;0m"117 )118 self.join_files(stats_file, ".csv")119 self.join_files(stats_file, "_eval.csv")120 def to_cave_csv(self, args):121 # file_path = args.file_path122 input_folder = "../mdp_files/"123 file_name = "dqn_vanilla_learning_starts"124 output_folder = "../to_cave_format/%s" % file_name125 if not os.path.exists(output_folder):126 os.makedirs(output_folder)127 ## Read current csvs ##128 stats_file = os.path.join(input_folder, file_name)129 with open(stats_file + ".csv") as file_:130 col_names = file_.readline().strip().split(", ")131 col_names[0] = col_names[0][2:] # to remove '# ' that was written132 # print("config_names:", col_names)133 stats_pd = pd.read_csv(134 stats_file + ".csv",135 skip_blank_lines=True,136 header=None,137 names=col_names,138 comment="#",139 sep=" ",140 )141 remove_names = ["training_iteration", "algorithm", "seed"]142 parameters = col_names[:-3].copy() # All paramaters tracked in run143 for x in col_names:144 for name in remove_names:145 if name in x:146 parameters.remove(x)147 # Compute parameters that varied and store value in dict148 config_values = {}149 seed_idx = -1150 for i, c in enumerate(col_names): # hardcoded 3 for no. of stats written151 if c in parameters: # parameters we care about152 config_values[c] = stats_pd[c].unique() # values a given parameter took153 if "seed" in c:154 seed_idx = i155 var_configs = [p for p in parameters if len(config_values[p]) > 1]156 configspace = self._create_configspace_json(stats_pd, var_configs)157 output_configspace = os.path.join(output_folder, "configspace.json")158 with open(output_configspace, "w") as fp:159 json.dump(configspace, fp, indent=2)160 scenario_str = "paramfile = ./configspace.json\nrun_obj = quality"161 output_configspace = os.path.join(output_folder, "scenario.txt")162 with open(output_configspace, "w") as fp:163 fp.write(scenario_str)164 # Runhistory and trajectory files165 runhistory_df = self._create_run_history(166 stats_pd, seed_idx, col_names, output_folder, var_configs167 )168 runhistory_df.to_csv(169 os.path.join(output_folder, "runhistory.csv"), header=True, index=False170 )171 def to_bohb_results(172 self, input_dir, exp_name, output_dir="../cave_output/", overwrite=False173 ):174 """Converts MDP Playground stats CSVs to BOHB format stats files:175 configs.json, results.json, configspace.json, in output_dir/exp_name.176 This file can be fed into cave for further analysis.177 Currently only compatible with the MDPP expt. of type: grid of configs178 exp_name : str179 Should be the expt name from MDPP, i.e., the "prefix" of the CSV stats files. A sub-directory of output_dir is created with this name to store BOHB format stats files.180 overwrite : bool181 If existing files should be overwritten.182 Returns "<output_dir>/<exp_name>"183 """184 print("Writing BOHB to cave output to %s" % (os.path.abspath(output_dir)))185 if not os.path.exists(output_dir):186 os.makedirs(output_dir)187 # file_path = args.file_path188 output_dir_final = os.path.join(output_dir, exp_name)189 if not os.path.exists(output_dir_final):190 os.makedirs(output_dir_final)191 # Read current CSVs192 ##TODO Re-use code from analyis.py to load data instead of processing it again here:193 # mdpp_analysis = MDPP_Analysis()194 # self.exp_data = mdpp_analysis.get_exp_data(dir_name=input_dir,195 # exp_name=exp_name,196 # )197 # print("exp_data:\n", self.exp_data)198 # exp_data["dims_varied"]199 # exp_data["dims_values"]200 stats_file = os.path.join(input_dir, exp_name)201 stats_file = os.path.abspath(stats_file)202 self._read_stats(stats_file)203 with open(stats_file + ".csv") as file_:204 col_names = file_.readline().strip().split(", ")205 col_names[0] = col_names[0][2:] # to remove '# ' that was written206 # print("config_names:", col_names)207 stats_pd = pd.read_csv(208 stats_file + ".csv",209 skip_blank_lines=True,210 header=None,211 names=col_names,212 comment="#",213 sep=" ",214 )215 remove_names = ["training_iteration", "algorithm", "seed"]216 parameters = col_names[:-3].copy() # All parameters tracked in run217 for x in col_names:218 for name in remove_names:219 if name in x:220 parameters.remove(x)221 # Compute parameters that varied and store value in dict222 config_values = {}223 seed_idx = -1224 for i, c in enumerate(col_names): # hardcoded 3 for no. of stats written225 if c in parameters: # parameters we care about226 config_values[c] = stats_pd[c].unique() # values a given parameter took227 if "seed" in c:228 seed_idx = i229 var_configs = [p for p in parameters if len(config_values[p]) > 1]230 final_rows_for_a_config = []231 for i in range(stats_pd.shape[0] - 1):232 if (233 stats_pd["timesteps_total"].iloc[i]234 > stats_pd["timesteps_total"].iloc[i + 1]235 ):236 final_rows_for_a_config.append(i)237 final_rows_for_a_config.append(238 stats_pd.shape[0] - 1239 ) # Always append the last row!240 ##------------- Start converting csv ----------------##241 # configspace and scenario file242 configspace = self._create_configspace_json(stats_pd, var_configs)243 cs_json_file = os.path.join(output_dir_final, "configspace.json")244 if os.path.exists(cs_json_file):245 if not overwrite:246 raise FileExistsError()247 with open(cs_json_file, "w") as fp:248 json.dump(configspace, fp, indent=2)249 # print("var_configs:", var_configs)250 # Trajectory and runhistory files251 # Finding end configuration training252 diff_configs = stats_pd.iloc[final_rows_for_a_config]253 # print("diff_configs:", diff_configs)254 diff_configs = diff_configs.groupby(var_configs)255 # print("grouped by", diff_configs)256 configs_mean = diff_configs.mean()257 # print("mean:", configs_mean)258 # print("diff_configs.groups:", diff_configs.groups)259 diff_configs_results = [] # results.json260 diff_configs_lst = []261 budget = stats_pd["timesteps_total"].iloc[262 final_rows_for_a_config[0]263 ] # all have the same budget264 aux = 0265 for i, group_name in enumerate(diff_configs.groups):266 group_labels = diff_configs.groups[group_name]267 config_id = [0, 0, i]268 config_dict = {}269 # configs.json270 config_lst = [config_id]271 for name in var_configs:272 value = stats_pd[name].iloc[group_labels[0]]273 if isinstance(value, str):274 config_dict[name] = value275 else:276 config_dict[name] = value.item()277 config_lst.append(config_dict)278 config_lst.append({"model_based_pick": False})279 diff_configs_lst.append(config_lst)280 # results.json281 mean_reward = configs_mean["episode_reward_mean"].iloc[i] # mean along seed282 results_lst = [283 config_id,284 budget.item(),285 {286 "submitted": float("%.2f" % aux),287 "started": float("%.2f" % (aux + 0.1)),288 "finished": float("%.2f" % (aux + 1)),289 },290 ]291 aux += 1.1292 results_dict = {"loss": -mean_reward.item(), "info": {}}293 results_lst.append(results_dict)294 results_lst.append(None)295 diff_configs_results.append(results_lst)296 # configs.json297 output_configs = os.path.join(output_dir_final, "configs.json")298 if os.path.exists(output_configs):299 if not overwrite:300 raise FileExistsError()301 with open(output_configs, "w") as fout:302 for d in diff_configs_lst:303 json.dump(d, fout)304 fout.write("\n")305 # results.json306 output_results = os.path.join(output_dir_final, "results.json")307 if os.path.exists(output_results):308 if not overwrite:309 raise FileExistsError()310 with open(output_results, "w") as fout:311 for d in diff_configs_results:312 json.dump(d, fout)313 fout.write("\n")314 return output_dir_final315 def to_CAVE_object(316 self, input_dir, exp_name, output_dir="../cave_output/", overwrite=False317 ):318 """Converts MDP Playground stats CSVs to BOHB format stats files and creates319 a CAVE object from them.320 Please see to_bohb_results() for details about some of the parameters.321 """322 cave_input_file = self.to_bohb_results(323 input_dir, exp_name, output_dir, overwrite=overwrite324 )325 cave_results = os.path.join(cave_input_file, "out")326 cave = CAVE(327 folders=[cave_input_file],328 output_dir=cave_results,329 ta_exec_dir=[cave_input_file],330 file_format="BOHB",331 show_jupyter=True,332 )333 return cave334if __name__ == "__main__":335 input_dir = "../mdp_files/"336 exp_name = "dqn_seq_del"337 from cave.cavefacade import CAVE338 import os339 # The converted mdpp csvs will be stored in output_dir340 output_dir = "../mdpp_to_cave"341 mdpp_file = os.path.join(input_dir, exp_name)342 mdpp_cave = MDPPToCave()343 cave_input_file = mdpp_cave.to_bohb_results(input_dir, exp_name, output_dir)344 # cave_input_file = "../../../mdpp_to_cave/dqn_seq_del"345 # Similarly, as an example, cave will ouput it's results346 # to the same directory as cave's input files347 cave_results = os.path.join(cave_input_file, "output")348 print(os.path.abspath(cave_results))349 cave = CAVE(350 folders=[cave_input_file],351 output_dir=cave_results,352 ta_exec_dir=[cave_input_file],353 file_format="BOHB",354 show_jupyter=True,355 )356 # Common analysis357 cave.performance_table()358 cave.local_parameter_importance()359 cave.cave_fanova() # can only be used with more than 1 dimension of hardness360 # Other analysis361 # cave.parallel_coordinates()362 # cave.cost_over_time()363 # cave.algorithm_footprints()364 # cave.pimp_comparison_table()365 # cave.cave_ablation()366 # cave.pimp_forward_selection()367 # cave.feature_importance()368 # cave.configurator_footprint()369 # cave.algorithm_footprints()370 # cave.plot_ecdf()371 # cave.plot_scatter()372 # cave.compare_default_incumbent()...

Full Screen

Full Screen

kconfig-bisect

Source:kconfig-bisect Github

copy

Full Screen

1#!/usr/bin/env python32#3# Use binary search to find the config change that introduced a bug4#5import argparse6import os7import shutil8import subprocess9import sys10from collections import OrderedDict11from difflib import Differ12from prettytable import PrettyTable13def read_config_file(config_file):14 '''15 Read a config file into an ordered dict16 '''17 configs = OrderedDict()18 with open(config_file, encoding='utf-8') as fh:19 for line in fh:20 line = line.strip()21 if line.startswith('CONFIG_'):22 cfg, val = line.split('=', 1)23 elif line.startswith('# CONFIG_'):24 cfg = line.split(' ')[1]25 # val = '___UNSET___'26 val = None27 elif line.startswith('# Linux/'):28 arch = line.split(' ')[1].split('/')[1]29 continue30 else:31 continue32 configs[cfg] = val33 return arch, configs34def write_config_file(configs):35 '''36 Write an ordered dict to a config file37 '''38 with open('.config', 'w', encoding='utf-8') as fh:39 for cfg, val in configs.items():40 if val is None:41 fh.write(f'# {cfg} is not set\n')42 else:43 fh.write(f'{cfg}={val}\n')44def merge_configs(good_configs, bad_configs):45 '''46 Merge two ordered dicts47 '''48 differ = Differ()49 result = differ.compare(list(good_configs.keys()),50 list(bad_configs.keys()))51 all_configs = []52 for r in list(result):53 if r[0:2] in ('- ', '+ ', ' '):54 all_configs.append(r[2:])55 merged_configs = OrderedDict()56 for cfg in all_configs:57 if cfg in merged_configs:58 print(f'-- Internal error: Duplicate config in ordered dict: {cfg}')59 sys.exit(1)60 merged_configs[cfg] = {61 'good': good_configs.get(cfg),62 'bad': bad_configs.get(cfg),63 }64 return merged_configs65def generate_config_file(arch, configs, merged_configs, prev_new_configs=None):66 '''67 Generate a new config file68 '''69 if prev_new_configs is None:70 prev_new_configs = []71 # Write the config file and determine all new config options72 write_config_file(configs)73 p = subprocess.run(['make', f'ARCH={arch}', 'listnewconfig'],74 stdout=subprocess.PIPE, text=True, check=True)75 new_configs = [p.strip() for p in p.stdout.split('\n') if p.strip()]76 if not new_configs or new_configs == prev_new_configs:77 # All done, i.e., no new configs or new configs didn't change from the78 # previous run.79 subprocess.run(['make', f'ARCH={arch}', 'olddefconfig'], check=True)80 return81 # For the new config options, use the values from the 'good' config if they82 # exist, or the defaults otherwise83 for new in new_configs:84 cfg, val = new.split('=', 1)85 if cfg in merged_configs:86 val = merged_configs[cfg]['good']87 print(f'-- New config: {cfg} -> {str(val)}')88 configs[cfg] = val89 # Rerun90 generate_config_file(arch, configs, merged_configs, new_configs)91def main():92 parser = argparse.ArgumentParser()93 subparsers = parser.add_subparsers(title='subcommands', required=True)94 sparser = subparsers.add_parser('start',95 help='Start a new bisect run')96 sparser.add_argument('good_config', metavar='good.config',97 help='Good config file')98 sparser.add_argument('bad_config', metavar='bad.config',99 help='Bad config file')100 sparser.set_defaults(command='start')101 sparser = subparsers.add_parser('good', help='Good test result')102 sparser.set_defaults(command='good')103 sparser = subparsers.add_parser('bad', help='Bad test result')104 sparser.set_defaults(command='bad')105 args = parser.parse_args()106 kb_dir = '.kconfig-bisect'107 good_config = os.path.join(kb_dir, 'good')108 bad_config = os.path.join(kb_dir, 'bad')109 if args.command == 'start':110 if not os.path.exists(kb_dir):111 os.mkdir(kb_dir)112 shutil.copy(args.bad_config, bad_config)113 shutil.copy(args.good_config, good_config)114 elif args.command == 'good':115 shutil.copy('.config', good_config)116 elif args.command == 'bad':117 shutil.copy('.config', bad_config)118 else:119 print(f'-- Internal errror: Unsupported command: {args.command}')120 sys.exit(1)121 # Read the config files122 arch, good_configs = read_config_file(good_config)123 _arch, bad_configs = read_config_file(bad_config)124 if arch != _arch:125 print(f'-- Config architectures don\'t match ({arch} != {_arch})')126 return 1127 print(f'-- Config arch: {arch}')128 # Merge the config files129 merged_configs = merge_configs(good_configs, bad_configs)130 # List of different configs131 diff_configs = []132 for cfg, val in merged_configs.items():133 if val['good'] != val['bad']:134 diff_configs.append(cfg)135 if not diff_configs:136 print('-- No differences found')137 return 0138 # Print the different config settings139 print(f'-- Diff configs: {len(diff_configs)}')140 table = PrettyTable()141 table.field_names = (['Config', 'Good', 'Bad'])142 for cfg in diff_configs:143 table.add_row([cfg, merged_configs[cfg]['good'],144 merged_configs[cfg]['bad']])145 table.align = 'l'146 print(table)147 # Create a new config148 new_configs = OrderedDict(bad_configs)149 for cfg in diff_configs[0:int((len(diff_configs) / 2))]:150 new_configs[cfg] = good_configs.get(cfg)151 generate_config_file(arch, new_configs, merged_configs)152 return 0153if __name__ == '__main__':...

Full Screen

Full Screen

kernel_config.py

Source:kernel_config.py Github

copy

Full Screen

...30 else:31 output.write(line)32 input.close()33 output.close()34def diff_configs(old, new):35 utils.system('diff -u %s %s > %s' % (old, new, new + '.diff'),36 ignore_status=True)37def modules_needed(config):38 return (utils.grep('CONFIG_MODULES=y', config) and utils.grep('=m', config))39def config_by_name(name, set):40 version = kernel_versions.version_choose_config(name, set[1:])41 if version:42 return set[0] + version43 return None44class kernel_config(object):45 # Build directory must be ready before init'ing config.46 #47 # Stages:48 # 1. Get original config file49 # 2. Apply overrides50 # 3. Do 'make oldconfig' to update it to current source code51 # (gets done implicitly during the process)52 #53 # You may specifiy the a defconfig within the tree to build,54 # or a custom config file you want, or None, to get machine's55 # default config file from the repo.56 build_dir = '' # the directory we're building in57 config_dir = '' # local repository for config_file data58 build_config = '' # the config file in the build directory59 orig_config = '' # the original config file60 over_config = '' # config file + overrides61 def __init__(self, job, build_dir, config_dir, orig_file,62 overrides, defconfig = False, name = None, make = None):63 self.build_dir = build_dir64 self.config_dir = config_dir65 # 1. Get original config file66 self.build_config = build_dir + '/.config'67 if (orig_file == '' and not defconfig and not make): # use user default68 set = job.config_get("kernel.default_config_set")69 defconf = None70 if set and name:71 defconf = config_by_name(name, set)72 if not defconf:73 defconf = job.config_get("kernel.default_config")74 if defconf:75 orig_file = defconf76 if (orig_file == '' and not make and defconfig): # use defconfig77 make = 'defconfig'78 if (orig_file == '' and make): # use the config command79 print "kernel_config: using " + make + " to configure kernel"80 os.chdir(build_dir)81 make_return = utils.system('make %s > /dev/null' % make)82 self.config_record(make)83 if (make_return):84 raise error.TestError('make % failed' % make)85 else:86 print "kernel_config: using " + orig_file + \87 " to configure kernel"88 self.orig_config = config_dir + '/config.orig'89 utils.get_file(orig_file, self.orig_config)90 self.update_config(self.orig_config, self.orig_config+'.new')91 diff_configs(self.orig_config, self.orig_config+'.new')92 # 2. Apply overrides93 if overrides:94 print "kernel_config: using " + overrides + \95 " to re-configure kernel"96 self.over_config = config_dir + '/config.over'97 overrides_local = self.over_config + '.changes'98 utils.get_file(overrides, overrides_local)99 apply_overrides(self.build_config, overrides_local, self.over_config)100 self.update_config(self.over_config, self.over_config+'.new')101 diff_configs(self.over_config, self.over_config+'.new')102 else:103 self.over_config = self.orig_config104 def update_config(self, old_config, new_config = 'None'):105 os.chdir(self.build_dir)106 shutil.copyfile(old_config, self.build_config)107 utils.system('yes "" | make oldconfig > /dev/null')108 if new_config:109 shutil.copyfile(self.build_config, new_config)110 def config_record(self, name):111 #Copy the current .config file to the config.<name>[.<n>]112 i = 1113 to = self.config_dir + '/config.%s' % name114 while os.path.exists(to):115 i += 1...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful