How to use list_metrics method in localstack

Best Python code snippet using localstack_python

Metrics.py

Source:Metrics.py Github

copy

Full Screen

1import numpy as np2import pandas as pd3import torch4import sklearn5from sklearn.neighbors import NearestNeighbors6def get_metrics(config, list_metrics, n_neighbors=2):7 """8 Generates an adversarial examples x' from an original sample x. Expected to contain9 Dataset, MaxIters, Alpha, Lambda, TrainData, TestData, ValidData, Scaler,10 FeatureNames, Target, Weights, Bounds, Model11 :param config: dictionnary containing the configuration for the experiment12 :param list_metrics: dictionnary containing the metrics to be computed. Choose from13 from SuccessRate, iter_means, iter_std, normdelta_median, normdelta_mean,14 n_std, weighted_median, weighted_mean, w_std, mean_dists_at_org,15 median_dists_at_org, mean_dists_at_tgt, mean_dists_at_org_weighted, mdow_std,16 median_dists_at_org_weighted, mean_dists_at_tgt_weighted, mdtw_std, prop_same_class_arg_org,17 prop_same_class_arg_adv,18 :param n_neighbors: number of neighbors to compute the distance to n_neighbors closest neighbors19 """20 metrics_for_conf = []21 df_test = config['TestData']22 dfs_adv = config['AdvData']23 24 for method, df_adv in dfs_adv.items():25 metrics_for_method = [method]26 # Get success rate before removing samples from dataframe27 if list_metrics['SuccessRate']:28 sr = metric_success_rate_for(df_adv)29 metrics_for_method.append(sr)30 # Removes samples that did cross frontier31 df_adv = remove_non_converted(df_adv) 32 df_adv = add_normdelta_to(df_adv, config, df_test)33 # Adding proportion of neighbors from diff classes34 df_adv, df_adv_weighted = add_maj_neighbors(df_adv, df_test, config, n_neighbors=n_neighbors) 35 # Mean, std, number of iterations36 if list_metrics['iter_means']:37 means_iters, stds_iters = mean_norm_for_col(df_adv, col='iters')38 metrics_for_method.append(means_iters)39 if list_metrics['iter_std']:40 metrics_for_method.append(stds_iters)41 # Median, norm of perturbation42 if list_metrics['normdelta_median']:43 median = median_norm_for_col(df_adv, col='normdelta')44 metrics_for_method.append(median)45 # Mean, std, norm of perturbation46 if list_metrics['normdelta_mean']:47 means, stds = mean_norm_for_col(df_adv, col='normdelta')48 metrics_for_method.append(means)49 if list_metrics['n_std']:50 metrics_for_method.append(stds)51 # Median, norm of perturbation, weighted52 if list_metrics['weighted_median']:53 median_w = median_norm_for_col(df_adv, col='normdelta_weighted')54 metrics_for_method.append(median_w)55 # Mean, std, norm of perturbation, weighted56 if list_metrics['weighted_mean']:57 means_w, stds_w = mean_norm_for_col(df_adv, col='normdelta_weighted')58 metrics_for_method.append(means_w)59 if list_metrics['w_std']:60 metrics_for_method.append(stds_w) 61 # Mean, std, number of neighbors of a particular class at perturbed sample62 if list_metrics['mean_dists_at_org']:63 mean, std = mean_norm_for_col(df_adv, col='mean_dists_at_org')64 metrics_for_method.append(mean)65 # Mean, std, number of neighbors of a particular class at perturbed sample66 if list_metrics['median_dists_at_org']:67 med = median_norm_for_col(df_adv, col='mean_dists_at_org')68 metrics_for_method.append(med)69 # Mean, std, number of neighbors of a particular class at perturbed sample70 if list_metrics['mean_dists_at_tgt']:71 mean, std = mean_norm_for_col(df_adv, col='mean_dists_at_tgt')72 metrics_for_method.append(mean)73 # Mean, std, number of neighbors of a particular class at perturbed sample74 if list_metrics['mean_dists_at_org_weighted']:75 mean, std = mean_norm_for_col(df_adv_weighted, col='mean_dists_at_org')76 metrics_for_method.append(mean)77 if list_metrics['mdow_std']:78 metrics_for_method.append(std)79 # Mean, std, number of neighbors of a particular class at perturbed sample80 if list_metrics['median_dists_at_org_weighted']:81 median = median_norm_for_col(df_adv_weighted, col='mean_dists_at_org')82 metrics_for_method.append(median)83 # Mean, std, number of neighbors of a particular class at perturbed sample84 if list_metrics['mean_dists_at_tgt_weighted']:85 mean, std = mean_norm_for_col(df_adv_weighted, col='mean_dists_at_tgt')86 metrics_for_method.append(mean)87 if list_metrics['mdtw_std']:88 metrics_for_method.append(std)89 # Mean, std, number of neighbors of a particular class at perturbed sample90 if list_metrics['prop_same_class_arg_org']:91 mean, std = mean_norm_for_col(df_adv, col='prop_same_class_arg_org')92 metrics_for_method.append(mean)93 # Mean, std, number of neighbors of a particular class at perturbed sample94 if list_metrics['prop_same_class_arg_adv']:95 mean, std = mean_norm_for_col(df_adv, col='prop_same_class_arg_adv')96 metrics_for_method.append(mean)97 98 metrics_for_conf.append(metrics_for_method)99 return metrics_for_conf100 101def metric_success_rate_for(df):102 return len(df[df['orig_pred'] != df['adv_pred']]) / df.shape[0]103def remove_non_converted(df):104 df_return = df.copy()105 return df[df['orig_pred'] != df['adv_pred']]106def mean_norm_for_col(df, col):107 tmp = df[col] 108 mean, std = np.mean(tmp), np.std(tmp)109 return (mean, std)110def median_norm_for_col(df, col):111 tmp = df[col] 112 median = np.median(tmp)113 return median114def add_normdelta_to(df_adv, conf, df):115 # Drop columns if already there116 df_return = df_adv.copy()117 if 'normdelta' in df_return.columns:118 df_return = df_return.drop(columns='normdelta')119 if 'normdelta_weighted' in df_return.columns:120 df_return = df_return.drop(columns='normdelta_weighted')121 122 feature_names = conf['FeatureNames']123 weights = conf['Weights']124 norms = []125 norms_weighted = []126 127 # Iterate over all rows128 for index, row in df_return.iterrows():129 orig = df.loc[index][feature_names].values130 adv = row[feature_names].values 131 132 # Compute deltas133 delta = np.abs(orig-adv)134 assert(len(delta) == len(weights))135 136 # Norms delta137 norms.append(np.linalg.norm(delta))138 139 # Norms delta weighted140 norms_weighted.append(np.linalg.norm(delta * weights))141 df_return.insert(0, 'normdelta', norms)142 df_return.insert(0, 'normdelta_weighted', norms_weighted)143 144 return df_return145def get_majority_neighbors(df_adv, df_orig, conf, knn, n_neighbors):146 147 # orig, adv148 mean_dists = [[], []]149 prop_same_class = [[], []]150 151 feature_names = conf['FeatureNames']152 target = conf['Target'] 153 154 # For each sample155 for index, row in df_adv.iterrows():156 157 orig = df_orig.loc[index][feature_names].values158 adv = row[feature_names].values159 160 preds = [row['orig_pred'], row['adv_pred']]161 samples = [orig, adv]162 163 for i in range(len(preds)):164 165 sample = samples[i]166 pred = preds[i]167 168 distance, neighbors_idxs = knn.kneighbors([sample], n_neighbors)169 neighbors_samples = df_orig.iloc[neighbors_idxs[0]]170 171 172 distance = [distance[0][1:]]173 neighbors_idxs = [neighbors_idxs[0][1:]]174 175 # Distance to closest neighbors176 if len(distance[0]) > 0 :177 dst_mean = np.mean(distance[0])178 else:179 print('Error, no neighbor found')180 mean_dists[i].append(dst_mean)181 182 neighbors_pts_target = np.array(neighbors_samples[target]).astype(int)183 prop = list(neighbors_pts_target).count(pred)184 prop_same_class[i].append(float(prop)/float(n_neighbors))185 186 return mean_dists, prop_same_class187def add_maj_neighbors_to(df_adv, df_orig, conf, knn, n_neighbors):188 df_return = df_adv.copy()189 190 if 'mean_dists_at_org' in df_return.columns:191 df_return = df_return.drop(columns='mean_dists_at_org')192 if 'mean_dists_at_tgt' in df_return.columns:193 df_return = df_return.drop(columns='mean_dists_at_tgt')194 if 'prop_same_class_arg_org' in df_return.columns:195 df_return = df_return.drop(columns='prop_same_class_arg_org')196 if 'prop_same_class_arg_adv' in df_return.columns:197 df_return = df_return.drop(columns='prop_same_class_arg_adv')198 199 mean_dists, prop_same_class = get_majority_neighbors(df_adv, df_orig, conf, knn, n_neighbors)200 201 df_return.insert(0, 'mean_dists_at_org', mean_dists[0])202 df_return.insert(0, 'mean_dists_at_tgt', mean_dists[1])203 df_return.insert(0, 'prop_same_class_arg_org', prop_same_class[0])204 df_return.insert(0, 'prop_same_class_arg_adv', prop_same_class[1])205 206 return df_return207def scale_data(conf, df_orig):208 print('Before')209 print(df.describe(include='all'))210 print(weights)211 for col, weight in zip(list(df.columns), weights):212 df[col] = df[col].apply(lambda x: x * weight)213 214 bounds = [[bounds[i][x] * weight for x, weight in enumerate(weights)] for i in range(len(bounds))]215 print(df.describe(include='all'))216 return df, bounds217def weighted_distance(x, y, w):218 sum_ = 0219 assert(len(x) == len(y) == len(w))220 for i in range(len(x)):221 sum_ += (w[i] * (y[i] - x[i])) ** 2222 sum_ = np.sqrt(sum_)223 return sum_224def add_maj_neighbors(df_adv, df_orig, conf, n_neighbors):225 # Otherwise we have issues because the KNN returns indexes in len(df) and not based on the real indexes on the samples226 df_adv = df_adv.reset_index().drop(columns=['index'])227 df_orig = df_orig.reset_index().drop(columns=['index'])228 weights = conf['Weights']229 assert(weights[0] > 0)230 feature_names = conf['FeatureNames']231 target = conf['Target']232 233 234 knn = NearestNeighbors(n_neighbors, metric='l2')235 knn.fit(df_orig[feature_names], df_orig[target])236 237 knn_weighted = NearestNeighbors(n_neighbors, metric=weighted_distance, metric_params={'w' : weights})238 knn_weighted.fit(df_orig[feature_names], df_orig[target])239 240 df_adv_return = add_maj_neighbors_to(df_adv, df_orig, conf, knn, n_neighbors)241 df_adv_weighted = add_maj_neighbors_to(df_adv, df_orig, conf, knn_weighted, n_neighbors)242 ...

Full Screen

Full Screen

graph.py

Source:graph.py Github

copy

Full Screen

1import os, copy2import modules.node3import modules.extra4class Graph(object):5 def __init__(self, graphfile):6 self.graphfile = graphfile7 self.nodes = {}8 9 self.n = 010 self.m = 011 self.density = 0.012 13 self.list_local_degree = {}14 self.list_local_cc = {}15 self.list_local_rc = {}16 self.list_metrics = {}17 self.list_correlations = {}18 def compute_metrics(self):19 self.__compute_degree_informations() 20 self.__compute_cc_informations()21 self.__compute_rc_informations()22 def create_metrics(self):23 self.list_metrics["degree"] = metric.Metric(self, "degree", self.list_local_degree)24 self.list_metrics["degree"].compile()25 #self.list_metrics["degree_norm"] = self.list_metrics["degree"].normalization()26 self.list_metrics["cc"] = metric.Metric(self, "cc", self.list_local_cc)27 self.list_metrics["cc"].compile()28 self.list_metrics["rc"] = metric.Metric(self, "rc", self.list_local_rc)29 self.list_metrics["rc"].compile()30 def treat_correlations(self):31 for metric1 in self.list_metrics:32 for metric2 in self.list_metrics:33 if metric1 != metric2: 34 name_correlation = "%s-%s" % (metric1, metric2)35 self.list_correlations[name_correlation] = metric.Correlation(self, self.list_metrics[metric1], self.list_metrics[metric2])36 self.list_correlations[name_correlation].compile()37 38 def __compute_degree_informations(self):39 for id_node in self.nodes:40 degree = self.nodes[id_node].degree41 self.list_local_degree[id_node] = degree42 self.m += degree43 self.m /= 244 self.average_degree = (2 * self.m) / float(self.n)45 self.density = (2 * self.m) / float(self.n * (self.n - 1))46 47 def __compute_cc_informations(self):48 for id_node in self.nodes:49 self.list_local_cc[id_node] = -1.050 k = self.nodes[id_node].degree * (self.nodes[id_node].degree - 1)51 # ALTERNATIVE METHOD (deprecated degree 1 nodes)52 '''k = 053 for id_neighbour in self.nodes[id_node].list_neighbours:54 if self.nodes[id_neighbour].degree > 1:55 k += 156 k = k * (k - 1) '''57 v = 058 for id_neighbour1 in self.nodes[id_node].list_neighbours:59 for id_neighbour2 in self.nodes[id_node].list_neighbours:60 if id_neighbour1 != id_neighbour2: 61 if (id_neighbour2 in self.nodes[id_neighbour1].list_neighbours):62 v += 163 if k != 0 and v != 0:64 self.list_local_cc[id_node] = v / float(k)65 66 def __compute_rc_informations(self): 67 for id_node in self.nodes:68 self.list_local_rc[id_node] = -1.069 k = self.nodes[id_node].degree * (self.nodes[id_node].degree - 1)70 # ALTERNATIVE METHOD (deprecated degree 1 nodes)71 '''k = 072 for id_neighbour in self.nodes[id_node].list_neighbours:73 if self.nodes[id_neighbour].degree > 1:74 k += 175 k = k * (k - 1) '''76 v = 077 78 for id_neighbour1 in self.nodes[id_node].list_neighbours:79 for id_neighbour2 in self.nodes[id_node].list_neighbours:80 if id_neighbour2 != id_neighbour1:81 for id_neighbour_id1 in self.nodes[id_neighbour1].list_neighbours:82 for id_neighbour_id2 in self.nodes[id_neighbour2].list_neighbours:83 if id_neighbour_id1 != id_node and id_neighbour_id1 == id_neighbour_id2:84 v += 0.585 break86 else:87 continue88 break89 90 if k != 0 and v != 0:91 self.list_local_rc[id_node] = (2*v) / float(k)92 93 def load(self):94 file = open(self.graphfile, 'r')95 # A AMERLIORER96 for line in file.read().splitlines():97 nodes_id = line.split()98 if nodes_id[0] not in self.nodes:99 self.nodes[nodes_id[0]] = modules.node.Node(nodes_id[0])100 for i in range(1, len(nodes_id)): 101 self.nodes[nodes_id[0]].add_neighbour(nodes_id[i])102 file.close()103 self.n = len(self.nodes)104 105 def informations(self):106 info = "\t\t#### [Statistics of graph (%s)] ####\n\n" % self.name107 info += "- GLOBAL STATS :\n\n"108 info += "\t# Size (n) = %d\n" % self.n109 info += "\t# Number of links (m) = %d\n" % self.m 110 info += "\t# Density = %0.6f\n" % self.density111 info += "\n- LOCAL METRICS :\n\n"112 for metric_name in self.list_metrics:113 info += str(self.list_metrics[metric_name])114 return info115 def informations_correlations(self):116 info = "\n\t ---- Correlations ---- \n"117 for correlation_name in self.list_correlations:118 info += str(self.list_correlations[correlation_name])119 return info120 def save_local_informations(self, filename):121 f = open(filename, 'w')122 com = "#ID\tDegree\tClustering coefficient\tRedundancy coefficient"123 if isinstance(self, graph_bipartite.Bipartite):124 com += "\tBipartite TOP(0)/BOT(1) part"125 com +="\n"126 f.write(com)127 for id_node in self.nodes:128 line = "%s\t%d\t%0.6f\t%0.6f" % (id_node, self.list_local_degree[id_node], self.list_local_cc[id_node], self.list_local_rc[id_node])129 if isinstance(self, graph_bipartite.Bipartite):130 if id_node in self.list_top_nodes:131 line += "\t0"132 else:133 line += "\t1"134 line +="\n"135 f.write(line)136 f.close()137 def save_metrics(self, directory_data):138 if not os.path.isdir(directory_data):139 os.mkdir(directory_data)140 for metric_name in self.list_metrics:141 self.list_metrics[metric_name].save(directory_data)142 def save_correlations(self, directory_data):143 directory_correlation = directory_data + "/correlations"144 if not os.path.isdir(directory_correlation):145 os.mkdir(directory_correlation)146 for correlation_name in self.list_correlations:147 self.list_correlations[correlation_name].save(directory_correlation)148 def __str__(self): ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful