How to use exceptions_csv method in locust

Best Python code snippet using locust

tkl_coverage_analysis.py

Source:tkl_coverage_analysis.py Github

copy

Full Screen

1import errno2import json3import numpy as np4import os5import pandas as pd6import plotly.express as px7import re8import sys9import toml10from pandas.core.dtypes.common import is_numeric_dtype11from pathlib import Path12import apps_list13pd.options.mode.chained_assignment = None # default='warn'14COLUMNS = ['App', 'Time limit', 'Interaction Level', 'Combination', 'Trial', 'Averaging criterion',15 'Instruction coverage', 'Branch coverage', 'Method coverage', 'Test methods', 'CTD coverage']16def calc_coverage(coverage_file, precision):17 all_data = pd.read_csv(coverage_file)18 cov_sums = all_data.sum(axis=0)19 inst_cov = round(cov_sums['INSTRUCTION_COVERED'] / (cov_sums['INSTRUCTION_COVERED'] + cov_sums['INSTRUCTION_MISSED']), precision)20 branch_cov = round(cov_sums['BRANCH_COVERED'] / (cov_sums['BRANCH_COVERED'] + cov_sums['BRANCH_MISSED']), precision)21 method_cov = round(cov_sums['METHOD_COVERED'] / (cov_sums['METHOD_COVERED'] + cov_sums['METHOD_MISSED']), precision)22 return inst_cov, branch_cov, method_cov23def count_test_methods(test_file_list):24 num_methods = 025 for test_file in test_file_list:26 with open(test_file, 'r') as f:27 lines = f.readlines()28 r = re.compile(r'[\t ]*@Test(?:\(time_limit ?= ?[0-9]+\))?[\t ]*')29 evo_none = re.compile(r'EvoSuite did not generate any tests')30 for i, line in enumerate(lines):31 if r.match(line):32 num_methods += 133 if evo_none.search(line):34 num_methods -= 135 return num_methods36def snake_case(s, pre='', post=''):37 if pre:38 pre += ' '39 if post:40 post = ' ' + post41 return re.sub(r'[()]', '', re.sub(r'[ ,.-]', '_', pre + s.lower() + post))42def open_csv(csv_file, columns, categorized=True):43 with open(csv_file) as f:44 df = pd.read_csv(f)45 if categorized:46 categories = list(df.columns[1 + len(columns):])47 df.columns = ['0'] + columns + categories48 return df.drop('0', axis=1), categories49 else:50 df.columns = ['0'] + columns51 return df.drop('0', axis=1)52def print_app_df(apps, avg_df, name_df, images_folder, categories, rng_y):53 current_images_folder_base = os.path.join(images_folder, name_df)54 desired_columns = ['Instruction coverage', 'Branch coverage', 'Method coverage'] + categories55 # create scattered plot per app, averaging trials and sampling trials alike56 current_images_folder = os.path.join(current_images_folder_base, 'per-app')57 Path(current_images_folder).mkdir(parents=True, exist_ok=True)58 for app in apps:59 print('Plotting coverage graphs for app ' + app)60 app_df = avg_df[(avg_df['App'] == app)]61 if app_df.empty:62 continue63 for column in desired_columns:64 app_df_filtered = app_df[(app_df[column].astype(str) != 'nan')].astype({column: float})65 if not app_df_filtered.empty:66 app_df_filtered.sort_values(by=['Interaction Level', 'Combination'], axis=0, inplace=True)67 fig = px.line(app_df_filtered, x='Interaction Level', y=column, color='Configuration',68 range_y=rng_y)69 fig.write_image(os.path.join(current_images_folder, f'{app}_{snake_case(column)}.png'), format='png')70 print('Written image ' + os.path.join(current_images_folder, f'{app}_{snake_case(column)}.png'))71def print_all_df(all_df, name_df, images_folder, categories, config):72 width, rng_y, pts = config['analyze']['width'], config['analyze']['rng_y'], config['analyze']['pts']73 all_df.replace('ctdamplified', 'ctdguided', regex=True, inplace=True)74 current_images_folder_base = os.path.join(images_folder, name_df)75 desired_columns = ['Instruction coverage', 'Branch coverage', 'Method coverage'] + categories76 # create box plot for all data, no averaging77 current_images_folder = os.path.join(current_images_folder_base, 'all-apps')78 Path(current_images_folder).mkdir(parents=True, exist_ok=True)79 print('Plotting coverage graphs for all data')80 for column in desired_columns:81 all_df_filtered = all_df[(all_df[column].astype(str) != 'nan')].astype({column: float})82 if not all_df_filtered.empty:83 all_df_filtered.sort_values(by=['Interaction Level', 'Combination'], axis=0, inplace=True)84 fig = px.box(all_df_filtered, x='Interaction Level', y=column, color='Configuration', points=pts,85 width=width, range_y=rng_y).update_traces(boxmean=True).update_layout(86 font=dict(87 size=18,88 ),89 legend=dict(90 orientation="h",91 yanchor="bottom",92 y=1.002,93 xanchor="right",94 x=1,95 ),96 legend_title_text='',97 )98 fig.write_image(os.path.join(current_images_folder, f'{snake_case(column)}.png'), format='png')99 print('Written image ' + os.path.join(current_images_folder, f'{snake_case(column)}.png'))100 all_df_filtered.to_csv(os.path.join(images_folder, '..', f'{name_df}_all-apps_{snake_case(column)}.csv'))101def print_all_csv_cumulate(all_csvs, name_csv, columns, images_folder, config):102 width, rng_y, pts = config['analyze']['width'], config['analyze']['rng_y'], config['analyze']['pts']103 all_dfs = []104 categories = None105 for all_csv in all_csvs:106 all_df, categories_all = open_csv(all_csv, columns, categorized=True)107 all_dfs.append(all_df)108 if not categories:109 categories = categories_all110 else:111 if categories != categories_all:112 print(f'{categories} (from file {all_csvs[0]}) is different from {categories_all} (from file {all_csv})', file=sys.stderr)113 assert False114 all_df = pd.concat(all_dfs, ignore_index=True)115 print_all_df(all_df, name_csv, images_folder, categories[:-3], config) # -3 because we add 3 columns after categories116def run_coverage_analysis_separate(app: str, config, categorizer_output):117 columns = COLUMNS118 time_limits, levels, trials, output_dir, combs = \119 config['general']['time_limits'], config['general']['levels'], config['general']['trials'], \120 config['general']['output_dir'], config['general']['combs']121 verbose, precision = config['verbosity']['verbose'], config['verbosity']['precision']122 width, rng_y, pts, plots_per_app, delete_intermediate, analyze_categories, images_dir, force = \123 config['analyze']['width'], config['analyze']['rng_y'], config['analyze']['pts'],\124 config['analyze']['plots_per_app'], config['analyze']['delete_intermediate'], \125 config['analyze']['analyze_categories'], config['analyze']['images_dir'], config['analyze']['force']126 categories = [] # ['Instruction coverage (0)', 'Instruction coverage (1)', 'Instruction coverage (under)',127 # 'Instruction coverage (over)', 'Instruction coverage (col)', 'Instruction coverage (total)']128 output_dir_abs = os.path.abspath(output_dir)129 app_dir = os.path.join(output_dir_abs, app)130 if os.path.exists(os.path.join(app_dir, f'{app}_cov_analysis_results.csv')) and not force:131 print(f'Skipped app {app} (results exist)')132 return133 df_apps = []134 df_time_limits = []135 df_int_levels = []136 df_combs = []137 df_trials = []138 df_averaging_criterion = []139 df_inst_cov = []140 df_branch_cov = []141 df_method_cov = []142 df_n_tests = []143 df_ctd_cov = []144 cov_df: pd.DataFrame145 if delete_intermediate:146 def remove_noexcept(filename, verbose=False):147 try:148 os.remove(filename)149 if verbose:150 print(f'Deleted file {os.path.basename(filename)}')151 except OSError as e:152 if e.errno != errno.ENOENT:153 raise154 remove_noexcept(os.path.join(app_dir, f'{app}_cov_analysis_results_intermediate.csv'), True)155 remove_noexcept(os.path.join(app_dir, f'{app}_cov_analysis_results_intermediate2.csv'), True)156 if os.path.exists(os.path.join(app_dir, f'{app}_cov_analysis_results_intermediate2.csv')):157 cov_df, categories = open_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_intermediate2.csv'), columns, categorized=True)158 print('Loaded DataFrame from saved second intermediate file')159 print(f'Implied categories: {categories}')160 else:161 if os.path.exists(os.path.join(app_dir, f'{app}_cov_analysis_results_intermediate.csv')):162 cov_df = open_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_intermediate.csv'),163 columns, categorized=False)164 print('Loaded DataFrame from saved intermediate file')165 else:166 for app_config in os.listdir(app_dir):167 if '.' in app_config or 'experiment' in app_config:168 continue169 short_config = app_config[app_config.find(app + '_') + len(app + '_'):]170 time_limit = short_config[:short_config.find('s_')]171 if not int(time_limit) in time_limits:172 continue173 interaction_level = short_config[short_config.find('s_') + 2:short_config.find('l_')]174 if not int(interaction_level) in levels:175 continue176 trial = short_config[short_config.rfind('_') + 1:]177 if not int(trial) in trials:178 continue179 print('App: {} time limit: {}, interaction_level: {}, trial: {}'.format(app, time_limit,180 interaction_level, trial))181 reports_dir = os.path.join(app_dir, app_config, app_config + "-tkltest-reports")182 ctd_cov_file = os.path.join(app_dir, app_config, app_config + "_test_generation_summary.json")183 if not os.path.isfile(ctd_cov_file):184 print("** ERROR: ctd coverage file {} doesn't exist".format(ctd_cov_file))185 continue186 with open(ctd_cov_file) as f:187 ctd_data = json.load(f)188 ctd_total_rows = ctd_data['test_plan_coverage_info']['test_plan_rows']189 ctd_orig_cov = round(ctd_data['test_plan_coverage_info']['rows_covered_bb_sequences'] / ctd_total_rows,190 2)191 ctd_final_cov = round(ctd_data['test_plan_coverage_info']['rows_covered_full'] / ctd_total_rows, 2)192 if not os.path.isdir(reports_dir):193 print("** Warning: reports dir {} doesn't exist".format(reports_dir))194 continue195 for comb_report_dir in os.listdir(os.path.join(reports_dir, 'jacoco-reports')):196 if comb_report_dir.startswith('.'):197 continue198 comb = comb_report_dir[comb_report_dir.find('_' + str(trial) + '-') + 2 + len(str(trial)):-6]199 if comb not in combs:200 continue201 # comb = comb[:comb.find('sampled') + 7] if 'sampled' in comb else comb # remove sampling trial number202 coverage_file = os.path.join(reports_dir, 'jacoco-reports',203 comb_report_dir, comb_report_dir + '.csv')204 if not os.path.isfile(coverage_file):205 print("** Warning: coverage file {} doesn't exist".format(coverage_file))206 continue207 df_apps.append(app)208 df_time_limits.append(time_limit)209 df_int_levels.append(interaction_level)210 df_combs.append(comb)211 df_trials.append(trial)212 df_averaging_criterion.append((app, interaction_level,213 comb[:comb.find('sampled') + 7] if 'sampled' in comb else comb))214 inst, branch, method = calc_coverage(coverage_file, precision)215 df_inst_cov.append(inst)216 df_branch_cov.append(branch)217 df_method_cov.append(method)218 df_n_tests.append(count_test_methods(list(Path(os.path.join(app_dir, app_config, comb_report_dir)).rglob('*.java'))))219 df_ctd_cov.append(ctd_final_cov if 'ctdamplified' in comb or 'ctd-amplified' in comb else ctd_orig_cov)220 cov_results = dict()221 cov_results['App'] = df_apps222 cov_results['Time limit'] = df_time_limits223 cov_results['Interaction Level'] = df_int_levels224 cov_results['Combination'] = df_combs225 cov_results['Trial'] = df_trials226 cov_results['Averaging criterion'] = df_averaging_criterion227 cov_results['Instruction coverage'] = df_inst_cov228 cov_results['Branch coverage'] = df_branch_cov229 cov_results['Method coverage'] = df_method_cov230 cov_results['Test methods'] = df_n_tests231 cov_results['CTD coverage'] = df_ctd_cov232 cov_df = pd.DataFrame(cov_results, columns=columns)233 cov_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_intermediate.csv'), mode='w+')234 print('Saved intermediate DataFrame to file')235 # add categorizer output to table236 if analyze_categories and categorizer_output:237 print('Reading from categorizer output')238 categories = categorizer_output[0][3:]239 print(f'Implied categories: {categories}')240 categorizer_output = categorizer_output[1:]241 cov_df[categories] = np.nan242 for cat in categorizer_output:243 # cat is ['app', 'combination', 'level', ('0', '1', 'under', 'over', 'col', 'total') * {1,2,3}]244 (cat_app, cat_comb, cat_level), cat = tuple(cat[:3]), cat[3:]245 for i in range(len(categories)):246 cov_df.loc[(cov_df['App'] == cat_app) & (cov_df['Combination'] == cat_comb) &247 (cov_df['Interaction Level'].astype(int) == int(cat_level)), categories[i]] = float(248 cat[i])249 cov_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_intermediate2.csv'), mode='w+')250 print('Saved second intermediate DataFrame to file')251 # add three more columns252 print('Adding three more columns')253 cov_df['Instruction coverage efficiency'] \254 = np.where(cov_df['Test methods'].astype(float) > 0,255 cov_df['Instruction coverage'].astype(float) / cov_df['Test methods'].astype(float), 0)256 cov_df['Branch coverage efficiency'] \257 = np.where(cov_df['Test methods'].astype(float) > 0,258 cov_df['Branch coverage'].astype(float) / cov_df['Test methods'].astype(float), 0)259 cov_df['Configuration'] = cov_df['Combination'].astype(str)260 # prepare columns for samples261 sampled_cov_df = None262 if cov_df['Combination'].astype(str).str.contains('-sampled', regex=True).any():263 print(f'Detected samples -- preparing columns for samples (assuming only trial 0 may be sampled)')264 for level in levels:265 app_df = cov_df[(cov_df['App'] == app) & (cov_df['Interaction Level'].astype(int) == int(level)) & (266 cov_df['Trial'].astype(int) == 0)]267 ctd_amplified_counts = app_df['Combination'].str.contains('ctd-amplified-sampled',268 regex=True).value_counts()269 evosuite_counts = app_df['Combination'].str.contains('evosuite-sampled', regex=True).value_counts()270 randoop_counts = app_df['Combination'].str.contains('randoop-sampled', regex=True).value_counts()271 sampled_suites = [True in ctd_amplified_counts, True in evosuite_counts, True in randoop_counts]272 if sampled_suites.count(True) != 2:273 continue274 non_sampled = \275 ['ctd-amplified', 'evosuite', 'randoop'][sampled_suites.index(False)]276 if non_sampled != 'ctd-amplified':277 implied_sampling_trials = ctd_amplified_counts[True]278 else:279 implied_sampling_trials = evosuite_counts[True]280 assert implied_sampling_trials == randoop_counts[True]281 print(f'Duplicating {non_sampled}, {implied_sampling_trials} times (app={app}, level={level}, trial=0)')282 not_sampled_rows = app_df[app_df['Combination'].str.match(f'{non_sampled}$')]283 not_sampled_rows.replace(non_sampled, f'{non_sampled}-sampled', inplace=True)284 cov_df = cov_df.append([not_sampled_rows] * implied_sampling_trials)285 cov_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_intermediate3.csv'), mode='w+')286 print('Saved non-loadable third intermediate DataFrame to file')287 sampled_cov_df = cov_df[cov_df['Combination'].str.contains('-sampled', regex=True)]288 sampled_cov_df.replace('-sampled(-[0-9]+)?', ' (sampled)', regex=True, inplace=True)289 sampled_cov_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_sampled.csv'), mode='w+')290 cov_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results.csv'), mode='w+')291 cov_df = cov_df[~cov_df['Combination'].str.contains('-sampled', regex=True)]292 sing_cov_df = cov_df[cov_df['Combination'].isin(['ctd-amplified', 'evosuite', 'randoop'])]293 sing_cov_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_singles.csv'), mode='w+')294 comb_cov_df = cov_df[cov_df['Combination'].isin(['ctdamplified-evosuite-randoop', 'evosuite-randoop'])]295 comb_cov_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_combinations.csv'), mode='w+')296 # take mean over all instances of same config (dropping sampling trial number must be done after cat analysis)297 print('Averaging DataFrame values according to averaging criterion')298 def take_avg(col):299 return str(col.mean()) if is_numeric_dtype(col) else col.unique() if col.nunique() == 1 else np.NaN300 sampled_avg_df = None301 if sampled_cov_df is not None:302 sampled_avg_df = sampled_cov_df.groupby('Averaging criterion').agg(take_avg)303 sampled_avg_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_sampled_averaged.csv'), mode='w+')304 sing_avg_df = sing_cov_df.groupby('Averaging criterion').agg(take_avg)305 sing_avg_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_singles_averaged.csv'), mode='w+')306 comb_avg_df = comb_cov_df.groupby('Averaging criterion').agg(take_avg)307 comb_avg_df.to_csv(os.path.join(app_dir, f'{app}_cov_analysis_results_combinations_averaged.csv'), mode='w+')308 # plot graphs based on raw data309 images_folder = os.path.join(output_dir_abs, images_dir)310 Path(images_folder).mkdir(exist_ok=True)311 avg_dfs = [sing_avg_df, comb_avg_df] + ([sampled_avg_df] if sampled_avg_df is not None else [])312 all_dfs = [(sing_cov_df, 'singles'), (comb_cov_df, 'combinations')] \313 + ([(sampled_cov_df, 'sampled')] if sampled_cov_df is not None else [])314 if plots_per_app:315 for avg_df, (_, name_df) in zip(avg_dfs, all_dfs):316 if avg_df is not None:317 print_app_df([app], avg_df, name_df, images_folder, categories, rng_y)318def run_coverage_analysis(config, categorizer_output):319 """The main entry point for analyzing experiments coverage results.320 This is the main entry point for an experiment coverage analysis. For each app in the experiment,321 each interaction level, each time limit, and each combination of resulting test suites (out of322 randoop, evosuite, and ctd-guided generated test suites), it computes the average instruction, branch323 and method coverage, test suite size (in terms of test methods) and *false* ctd coverage (in terms of rows covered324 out of the ctd test plan)325 """326 for app in config['general']['apps']:327 run_coverage_analysis_separate(app, config, categorizer_output)328def parse_oc_json(file_name, pts, pts2, verbose=False, app=''):329 with open(file_name) as f:330 oc_methods = json.load(f)331 level = oc_methods['interaction_level']332 oc_methods = oc_methods['ctd_coverage']['monolithic']333 for cls in oc_methods:334 methods = oc_methods[cls]335 for name in methods:336 ctd_cov = float(methods[name]['ctd_coverage'])337 ctd_cov_bb = float(methods[name]['ctd_coverage_bb_sequences'])338 pts.append({'Configuration': 'ctd-amplified', 'Combination': 'ctd-amplified', 'Interaction Level': level, 'CTD Coverage': ctd_cov, 'Filename': file_name})339 pts.append({'Configuration': 'evosuite-randoop', 'Combination': 'ctd-amplified', 'Interaction Level': level, 'CTD Coverage': ctd_cov_bb, 'Filename': file_name})340 pts2.append({'Class': cls, 'Method': name, 'Interaction Level': level, 'Coverage by CTD-Amplified': ctd_cov, 'Coverage by Basic Block': ctd_cov_bb, 'App': app, 'Filename': file_name})341 if verbose:342 print(f'OC level:{level} appended {ctd_cov} vs. {ctd_cov_bb}')343def parse_all_oc(oc_apps, time_limit, trial, output_dir, verbose=False):344 pts, pts2 = [], []345 if not oc_apps:346 json_glob = Path(output_dir).rglob(f'*/*_{time_limit}s_*l_{trial}/*_ctd_coverage_report.json')347 for file_name in json_glob:348 parse_oc_json(file_name, pts, pts2, verbose)349 if verbose:350 print(f'Finished parsing json file {file_name} for oc')351 else:352 for app in oc_apps:353 app_json_glob = Path(output_dir).rglob(f'{app}/{app}_{time_limit}s_*l_{trial}/*_ctd_coverage_report.json')354 for file_name in app_json_glob:355 parse_oc_json(file_name, pts, pts2, verbose, app)356 if verbose:357 print(f'Finished parsing json file {file_name} for oc')358 return pts, pts2359def plot_oc(time_limit, trial, config, csv_file=None):360 apps, output_dir = config['general']['apps'], config['general']['output_dir']361 verbose = config['verbosity']['verbose']362 images_dir, pts, rng_y = config['analyze']['images_dir'], config['analyze']['pts'], config['analyze']['rng_y']363 if not csv_file:364 pts, pts2 = parse_all_oc(apps, time_limit, trial, verbose)365 pts_df = pd.DataFrame(pts)366 pts2_df = pd.DataFrame(pts2)367 pts2_df.to_csv(os.path.join(output_dir, f'ctd_coverage_analysis_oc2_{time_limit}s_{trial}.csv'))368 else:369 f = open(csv_file)370 pts_df = pd.read_csv(f)371 pts_df.columns = ['0'] + ['Configuration', 'Combination', 'Interaction Level', 'CTD Coverage', 'Filename']372 pts_df.drop('0', axis=1, inplace=True)373 current_images_folder_base = os.path.join(output_dir, images_dir, 'ctd-coverage')374 current_images_folder = os.path.join(current_images_folder_base, 'all-apps')375 Path(current_images_folder).mkdir(parents=True, exist_ok=True)376 pts_df.to_csv(os.path.join(output_dir, f'ctd_coverage_analysis_oc_{time_limit}s_{trial}.csv'))377 print(f'Written to file {output_dir}/tkl_coverage_analysis_oc_{time_limit}s_{trial}.csv')378 print(f'Written to auxiliary file {output_dir}/tkl_coverage_analysis_oc2_{time_limit}s_{trial}.csv')379 print('Plotting *true* ctd coverage graph for all data')380 if not pts_df.empty:381 column = 'CTD Coverage'382 pts_df.sort_values(by=['Interaction Level', 'Combination'], axis=0, inplace=True)383 fig = px.box(pts_df, x='Interaction Level', y=column, color='Configuration', points=pts,384 width=720, range_y=rng_y).update_traces(boxmean=True).update_layout(385 legend=dict(386 orientation="h",387 yanchor="bottom",388 y=1.02,389 xanchor="right",390 x=1391 ),392 )393 fig.write_image(os.path.join(current_images_folder, f'{snake_case(column, pre="true")}.png'), format='png')394 print('Written image ' + os.path.join(current_images_folder, f'{snake_case(column, pre="true")}.png'))395def parse_exceptions_json(file_name, app, time_limit, level, trial, exceptions_csv, verbose=False):396 with open(file_name) as f:397 summary = json.load(f)398 if 'execution_fail_exception_types' not in summary.keys():399 return exceptions_csv400 exceptions = summary['execution_fail_exception_types']401 for exception in exceptions:402 ex_count = exceptions[exception]403 exception = str(exception).split()[0].split('.')[-1]404 exceptions_csv.append([app, 'ctd-amplified', time_limit, level, trial, exception, ex_count])405 if verbose:406 print(f'Appended {exception} ({ex_count})')407 return exceptions_csv408def parse_exceptions(time_limit, level, trial, config):409 apps, output_dir = config['general']['apps'], config['general']['output_dir']410 verbose = config['verbosity']['verbose']411 from csv import reader, writer412 with open(f'{output_dir}/exceptions_summary.csv', 'r') as read_obj:413 csv_reader = reader(read_obj)414 exceptions_csv = list(csv_reader)415 real_apps = []416 for app in apps:417 if os.path.exists(f'{output_dir}/{app}/{app}_{time_limit}s_{level}l_{trial}/{app}_{time_limit}s_{level}l_{trial}_test_generation_summary.json'):418 real_apps.append(app)419 print(f'Real apps: {real_apps} ({len(real_apps)} out of {len(apps)})')420 for app in real_apps:421 parse_exceptions_json(f'{output_dir}/{app}/{app}_{time_limit}s_{level}l_{trial}/{app}_{time_limit}s_{level}l_{trial}_test_generation_summary.json',422 app, time_limit, level, trial, exceptions_csv, verbose)423 if verbose:424 print(f'Finished parsing test-gen summary json for app {app} for exceptions')425 with open(f'{output_dir}/tkl_coverage_analysis_ex_{time_limit}s_{level}l_{trial}_raw.csv', 'w') as write_obj:426 csv_writer = writer(write_obj)427 csv_writer.writerows(exceptions_csv)428 print(f'Written to file {output_dir}/tkl_coverage_analysis_ex_{time_limit}s_{level}l_{trial}_raw.csv')429 exceptions_df = pd.DataFrame(exceptions_csv, columns=['App', 'Configuration', 'Timeout', 'Interaction Level', 'Trial', 'Exception', 'Count'])430 exceptions_df.sort_values(['App', 'Configuration', 'Exception'], inplace=True)431 exceptions_df.to_csv(f'{output_dir}/tkl_coverage_analysis_ex_{time_limit}s_{level}l_{trial}.csv')432 print(f'Written to file {output_dir}/tkl_coverage_analysis_ex_{time_limit}s_{level}l_{trial}.csv')433 plot_exceptions(time_limit, level, trial, output_dir, exceptions_df=exceptions_df)434def plot_exceptions(time_limit, level, trial, config, exceptions_df=None, csv_file=None):435 output_dir = config['general']['output_dir']436 images_dir = config['analyze']['images_dir']437 if exceptions_df is None:438 open_csv(csv_file, columns=['App', 'Configuration', 'Timeout', 'Interaction Level', 'Trial', 'Exception', 'Count'], categorized=False)439 for conf in ['ctd-amplified', 'evosuite', 'randoop']:440 exceptions_df.loc[(exceptions_df['Configuration'] == conf), 'Percentage'] = \441 exceptions_df[(exceptions_df['Configuration'] == conf)]['Count'] / \442 exceptions_df[(exceptions_df['Configuration'] == conf)]['Count'].sum()443 for conf in ['ctd-amplified', 'evosuite', 'randoop']:444 conf_df = exceptions_df[(exceptions_df['Configuration'] == conf) & (exceptions_df['Timeout'].astype(int) == time_limit) &445 (exceptions_df['Interaction Level'].astype(int) == level) & (exceptions_df['Trial'].astype(int) == trial)].copy()446 if conf_df.empty:447 continue448 conf_df.sort_values('Exception', ascending=False, inplace=True)449 bars = px.bar(conf_df,450 y='Exception', x='Percentage',451 title=f'Histogram of exceptions for configuration {conf} and level {level}',452 orientation='h',453 width=1080, height=1720).update_layout(454 yaxis=dict(tickmode='linear'),455 )456 bars.write_image(os.path.join(output_dir, images_dir, f'exceptions_histogram_{conf}_{time_limit}s_{level}l_{trial}.png'), format='png')457 print(f"Written histogram image {os.path.join(output_dir, images_dir, f'exceptions_histogram_{conf}_{time_limit}s_{level}l_{trial}.png')}")458 exceptions_df.sort_values(['App', 'Configuration', 'Exception'], inplace=True)459 exceptions_df.sort_values('Exception', ascending=False, inplace=True)460 rare_exceptions = []461 rare_threshold = 0.02462 for exception in list(exceptions_df['Exception']):463 percent = {conf: exceptions_df[(exceptions_df['Exception'] == exception) &464 (exceptions_df['Configuration'] == conf)]['Percentage'].sum()465 for conf in ['ctd-amplified', 'evosuite', 'randoop']}466 if percent['ctd-amplified'] <= rare_threshold and percent['evosuite'] <= rare_threshold and percent['randoop'] <= rare_threshold:467 rare_exceptions.append(exception)468 else:469 if exception[-1] == ';':470 exception_new = exception[:-1]471 exceptions_df['Exception'].replace(exception, exception_new, regex=False, inplace=True)472 if exception != 'Exception':473 exception_new = re.sub('Exception', '', exception)474 exceptions_df['Exception'].replace(exception, exception_new, regex=False, inplace=True)475 print('Rare exceptions:', rare_exceptions)476 exceptions_df = exceptions_df[~exceptions_df['Exception'].isin(rare_exceptions)]477 exceptions_df.sort_values(['Exception', 'Configuration'], ascending=[False, True], inplace=True)478 exceptions_df.replace('ctd-amplified', 'ctd-guided', inplace=True)479 bars = px.bar(exceptions_df,480 y='Exception', x='Percentage', color='Configuration',481 # title=f'Histogram of exceptions per configuration for level {level}',482 orientation='h', width=1080, height=1320).update_layout(483 yaxis=dict(tickmode='linear'), yaxis_title=None,484 xaxis=dict(tick0=0, dtick=0.05, tickformat=',.0%'), xaxis_title=None,485 font=dict(size=27),486 legend=dict(orientation="h", yanchor="bottom", y=1.002, xanchor="right", x=1),487 legend_title_text='',488 barmode='group',489 )490 bars.write_image(os.path.join(output_dir, images_dir, f'exceptions_histogram_all_{time_limit}s_{level}l_{trial}.png'), format='png')491 print(f"Written histogram image {os.path.join(output_dir, images_dir, f'exceptions_histogram_all_{time_limit}s_{level}l_{trial}.png')}")492def main_cumulate(config):493 columns = COLUMNS494 output_dir_abs = os.path.abspath(config['general']['output_dir'])495 images_folder = os.path.join(output_dir_abs, config['analyze']['images_dir'])496 for name_csv in ['singles', 'combinations', 'sampled']:497 all_csvs = []498 for app in config['general']['apps']:499 app_dir = os.path.join(output_dir_abs, app)500 all_csvs.append(os.path.join(app_dir, f'{app}_cov_analysis_results_{snake_case(name_csv)}.csv'))501 print_all_csv_cumulate(all_csvs, name_csv, columns, images_folder, config)502if __name__ == '__main__':503 apps = sys.argv[1:]504 with open('parsed_config.toml') as f:505 config = toml.load(f)506 if apps:507 # parse_exceptions(60, 3, 0, config)508 plot_exceptions(60, 3, 0, config, csv_file='/Users/antonio/Desktop/FINAL/exceptions/tkl_coverage_analysis_ex_60s_3l_0.csv')...

Full Screen

Full Screen

web.py

Source:web.py Github

copy

Full Screen

...272 }273 )274 @app.route("/exceptions/csv")275 @self.auth_required_if_enabled276 def exceptions_csv():277 data = StringIO()278 writer = csv.writer(data)279 self.stats_csv_writer.exceptions_csv(writer)280 return _download_csv_response(data.getvalue(), "exceptions")281 def start(self):282 self.greenlet = gevent.spawn(self.start_server)283 self.greenlet.link_exception(greenlet_exception_handler)284 def start_server(self):285 if self.tls_cert and self.tls_key:286 self.server = pywsgi.WSGIServer(287 (self.host, self.port), self.app, log=None, keyfile=self.tls_key, certfile=self.tls_cert288 )289 else:290 self.server = pywsgi.WSGIServer((self.host, self.port), self.app, log=None)291 self.server.serve_forever()292 def stop(self):293 """...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run locust automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful