How to use create_metrics_plot method in autotest

Best Python code snippet using autotest_python

create_graphs.py

Source:create_graphs.py Github

copy

Full Screen

...5from matplotlib import pyplot as plt6current_folder = '/home/2014-0353_generaleye/Huy/YOLOv3/Main/Visualizations/'7input_file1 = '/home/2014-0353_generaleye/Huy/YOLOv3/Main/scores_all.xlsx'8input_file2 = 'PR_all.xlsx'9def create_metrics_plot(df_plot):10 with sns.plotting_context('paper', font_scale=1.3):11 g = sns.FacetGrid(df_plot, col='Metrics', hue='Model', col_wrap=2, palette='tab10', height=5, aspect=1.5, despine=False)12 g = g.map(sns.lineplot, 'Threshold', 'Score')13 g.set_titles(row_template = '{row_name}', col_template = '{col_name}')14 g.add_legend(frameon=True)15 16 return g17 18def create_PR_curve(df_plot, df_F1_model):19 with sns.plotting_context('paper', font_scale=1.3):20 g = sns.FacetGrid(df_plot, col='Threshold', hue='Model', col_wrap=2, palette='tab10', height=5, aspect=1, despine=False) 21 for i in range(len(list_f1)):22 g = g.map(sns.lineplot, 'Recall', df_plot.columns[4+i], color='grey', alpha=0.1)23 txt_loc = (recall_cutoff-0.01, df_plot.loc[df_plot['Recall'] > recall_cutoff-0.01, df_plot.columns[4+i]].unique()+0.02)24 for ax in g.axes.ravel():25 ax.text(*txt_loc, df_plot.columns[4+i], horizontalalignment='right', size='small', color='grey', alpha=0.7)26 g = g.map(sns.lineplot, 'Recall', 'Precision')27 df_AP = df_plot.groupby(['Model', 'Threshold']).agg({'Precision': lambda x: np.nansum(x[0:101:10])/11}).reset_index()28 df_AP = df_AP.merge(df_F1_model, on=['Model', 'Threshold'])29 df_AP['Precision'] = df_AP['Precision'].round(2)30 df_AP['Model'] = pd.Categorical(df_AP['Model'], [x.lstrip('Model_') for x in list_models2])31 df_AP = df_AP.sort_values('Model')32 df_AP['Model'] = df_AP['Model'].replace({'COCO_Lab': 'COCO', 'Synth_Lab': 'Synth', 'Synth_DA_Lab': 'Synth_DA'})33 for ax, threshold in zip(g.axes.ravel(), df_plot['Threshold'].unique()):34 df_temp = df_AP.loc[df_AP['Threshold'] == threshold, ['Model', 'Precision', 'F1']]35 loc_legend = 'upper right' if threshold>=0.9 else 'lower left'36 ax.legend(loc=loc_legend,37 handles=ax.lines[-4:], 38 labels=['{}'.format(df_temp.iloc[i,0]) for i in range(len(df_temp))])39 g.set(ylim=(0, 1.05), xlim=(0, recall_cutoff))40 g.set_titles(row_template = '{row_name}', col_template = 'PR-Curve (IOU>0.5)')41 42 return g43 44 45if __name__ == '__main__':46 47 # INITIALIZE DATA SCORES48 data1 = pd.read_excel(input_file1, sheet_name=None)49 list_models = [model for model in list(data1) if 'Model' in model]50 51 # APPEND DATAFRAMES OF EACH MODEL52 df_plot = pd.DataFrame()53 for model in list_models:54 df_model = data1[model].iloc[:, :-1]55 df_pivot = df_model.melt(id_vars=['Threshold'], var_name='Metrics', value_name='Score')56 df_pivot['Model'] = model.lstrip('Model_')57 df_pivot = df_pivot[[df_pivot.columns[-1], *df_pivot.columns[:-1]]]58 df_plot = df_plot.append(df_pivot, ignore_index=True)59 # CREATE METRICS PLOT60 plot_metrics = create_metrics_plot(df_plot)61 plot_metrics.savefig(current_folder + 'Plot_metrics.png')62 # INITIALIZE DATA PR DATA63 data2 = pd.read_excel(current_folder + input_file2, sheet_name=None)64 # APPEND DATAFRAMES OF EACH MODEL65 list_output = {}66 df_F1_model = pd.DataFrame(columns={'Model', 'Threshold', 'F1'})67 list_models2 = [model for model in list(data2)]68 for model in list_models2:69 df_model = data2[model]70 71 df_model['F1'] = 2*df_model['Recall']*df_model['Precision']/(df_model['Precision'] + df_model['Recall']) 72 df_model['F1'] = df_model['F1'].fillna(0)73 df_model['Model'] = model.split('Model_')[-1]74 df_agg = df_model.groupby(['Model', 'Threshold']).agg({'F1': lambda x: np.max(x)}).reset_index()...

Full Screen

Full Screen

test_model_performance.py

Source:test_model_performance.py Github

copy

Full Screen

...17AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv")18YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv")19# Important to set seed for reproducibility20set_random_seed(42)21def create_metrics_plot(metrics):22 # Plotly params23 prediction_color = "#2d92ff"24 actual_color = "black"25 line_width = 226 marker_size = 427 xaxis_args = {"showline": True, "mirror": True, "linewidth": 1.5, "showgrid": False}28 yaxis_args = {"showline": True, "mirror": True, "linewidth": 1.5, "showgrid": False, "rangemode": "tozero"}29 layout_args = {30 "autosize": True,31 "template": "plotly_white",32 "margin": go.layout.Margin(l=0, r=10, b=0, t=30, pad=0),33 "font": dict(size=10),34 "title": dict(font=dict(size=10)),35 "width": 1000,36 "height": 200,37 }38 metric_cols = [col for col in metrics.columns if not ("_val" in col or col == "RegLoss")]39 fig = make_subplots(rows=1, cols=len(metric_cols), subplot_titles=metric_cols)40 for i, metric in enumerate(metric_cols):41 fig.add_trace(42 go.Scatter(43 y=metrics[metric],44 name=metric,45 mode="lines",46 line=dict(color=prediction_color, width=line_width),47 legendgroup=metric,48 ),49 row=1,50 col=i + 1,51 )52 if f"{metric}_val" in metrics.columns:53 fig.add_trace(54 go.Scatter(55 y=metrics[f"{metric}_val"],56 name=f"{metric}_val",57 mode="lines",58 line=dict(color=actual_color, width=line_width),59 legendgroup=metric,60 ),61 row=1,62 col=i + 1,63 )64 if metric == "Loss":65 fig.add_trace(66 go.Scatter(67 y=metrics["RegLoss"],68 name="RegLoss",69 mode="lines",70 line=dict(color=actual_color, width=line_width),71 legendgroup=metric,72 ),73 row=1,74 col=i + 1,75 )76 fig.update_xaxes(xaxis_args)77 fig.update_yaxes(yaxis_args)78 fig.update_layout(layout_args)79 return fig80def test_PeytonManning():81 df = pd.read_csv(PEYTON_FILE)82 m = NeuralProphet()83 df_train, df_test = m.split_df(df=df, freq="D", valid_p=0.2)84 metrics = m.fit(df_train, validation_df=df_test, freq="D")85 accuracy_metrics = metrics.to_dict("records")[-1]86 with open(os.path.join(DIR, "tests", "metrics", "PeytonManning.json"), "w") as outfile:87 json.dump(accuracy_metrics, outfile)88 create_metrics_plot(metrics).write_image(os.path.join(DIR, "tests", "metrics", "PeytonManning.svg"))89def test_YosemiteTemps():90 df = pd.read_csv(YOS_FILE)91 m = NeuralProphet(92 n_lags=24,93 n_forecasts=24,94 changepoints_range=0.95,95 n_changepoints=30,96 weekly_seasonality=False,97 )98 df_train, df_test = m.split_df(df=df, freq="5min", valid_p=0.2)99 metrics = m.fit(df_train, validation_df=df_test, freq="5min")100 accuracy_metrics = metrics.to_dict("records")[-1]101 with open(os.path.join(DIR, "tests", "metrics", "YosemiteTemps.json"), "w") as outfile:102 json.dump(accuracy_metrics, outfile)103 create_metrics_plot(metrics).write_image(os.path.join(DIR, "tests", "metrics", "YosemiteTemps.svg"))104def test_AirPassengers():105 df = pd.read_csv(AIR_FILE)106 m = NeuralProphet(seasonality_mode="multiplicative")107 df_train, df_test = m.split_df(df=df, freq="MS", valid_p=0.2)108 metrics = m.fit(df_train, validation_df=df_test, freq="MS")109 accuracy_metrics = metrics.to_dict("records")[-1]110 with open(os.path.join(DIR, "tests", "metrics", "AirPassengers.json"), "w") as outfile:111 json.dump(accuracy_metrics, outfile)...

Full Screen

Full Screen

run_process_metrics.py

Source:run_process_metrics.py Github

copy

Full Screen

...24 png_filename = f"{filename_without_extension}.png"25 test_metrics = load_test_metrics_json(filename=json_filename)26 compare_test_metrics(error_on_diff=False, error_on_miss=False, test_metrics=test_metrics)27 metrics_dict = compute_metrics(test_metrics=test_metrics, interactive=False)28 create_metrics_plot(metrics_dict, filename=png_filename)29 logging.debug("Wrote {0}".format(png_filename))30if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful