How to use test_outer method in hypothesis

Best Python code snippet using hypothesis

test_spherical_tensor.py

Source:test_spherical_tensor.py Github

copy

Full Screen

1import torch2from torch_gauge.o3 import O3Tensor, SphericalTensor3def test_spherical_tensor_creation1d():4 dten = torch.rand(4, 6, 12, 101, 7)5 metadata = torch.LongTensor([[7, 9, 1, 5, 3]])6 SphericalTensor(dten, (3,), metadata)7 return 08def test_spherical_tensor_creation2d():9 dten = torch.rand(4, 6, 12, 101, 7)10 metadata = torch.LongTensor([[1, 2, 1, 0, 0], [7, 9, 1, 5, 3]])11 SphericalTensor(dten, (2, 3), metadata)12 return 013def test_spherical_tensor_layout1d():14 dten = torch.rand(4, 19)15 metadata = torch.LongTensor([[1, 2, 1, 1]])16 test_sp_ten = SphericalTensor(dten, (1,), metadata)17 assert test_sp_ten.num_channels == (5,)18 assert torch.all(19 test_sp_ten.rep_layout[0][0].eq(20 torch.LongTensor([0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3])21 )22 )23 assert torch.all(24 test_sp_ten.rep_layout[0][1].eq(25 torch.LongTensor(26 [0, -1, -1, 0, 0, 1, 1, -2, -1, 0, 1, 2, -3, -2, -1, 0, 1, 2, 3]27 )28 )29 )30 assert torch.all(31 test_sp_ten.rep_layout[0][2].eq(32 torch.LongTensor([0, 1, 2, 1, 2, 1, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4])33 )34 )35 return 036def test_spherical_tensor_scalar_product_1d():37 dten = torch.tensor(38 [39 [1.1, 2.2, 0.5, 0.6, -0.6, 0.9, 0.7, 0.3, 0.1, 0.2, 0.3, 0.4, 0.5],40 [0.0, 0.0, 0.2, 0.0, 0.2, 0.0, 0.3, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0],41 ]42 )43 metadata = torch.LongTensor([[2, 2, 1]])44 test_sp_ten = SphericalTensor(dten, (1,), metadata)45 scale_ten = torch.tensor([[2.0, 0.5, 3.0, 4.0, 1.0], [101.0, 7.0, 8.0, 82.0, 4.0]])46 dten_outplace = test_sp_ten.scalar_mul(scale_ten)47 assert torch.allclose(48 dten_outplace.ten,49 torch.tensor(50 [51 [2.2, 1.1, 1.5, 2.4, -1.8, 3.6, 2.1, 1.2, 0.1, 0.2, 0.3, 0.4, 0.5],52 [0.0, 0.0, 1.6, 0.0, 1.6, 0.0, 2.4, 0.0, 4.0, 4.0, 4.0, 4.0, 4.0],53 ]54 ),55 atol=1e-10,56 rtol=1e-7,57 )58 assert torch.all(dten_outplace.rep_layout[0].eq(test_sp_ten.rep_layout[0]))59 dten_inplace = test_sp_ten.scalar_mul(scale_ten, inplace=True)60 assert torch.allclose(61 dten_inplace.ten,62 torch.tensor(63 [64 [2.2, 1.1, 1.5, 2.4, -1.8, 3.6, 2.1, 1.2, 0.1, 0.2, 0.3, 0.4, 0.5],65 [0.0, 0.0, 1.6, 0.0, 1.6, 0.0, 2.4, 0.0, 4.0, 4.0, 4.0, 4.0, 4.0],66 ]67 ),68 atol=1e-10,69 rtol=1e-7,70 )71 assert torch.all(dten_inplace.rep_layout[0].eq(test_sp_ten.rep_layout[0]))72def test_spherical_tensor_scalar_product_2d():73 metadata2d = torch.LongTensor([[2, 1], [1, 1]])74 test_2d = SphericalTensor(75 torch.tensor(76 [77 [0.1, 0.3, 0.3, 0.3],78 [0.2, -0.7, 0.1, -0.2],79 [0.3, 0.2, -0.3, 0.4],80 [0.2, 0.3, 0.4, -0.5],81 [0.1, 0.2, 0.3, 0.4],82 ]83 ),84 (85 0,86 1,87 ),88 metadata2d,89 )90 scale_2d = torch.tensor(91 [92 [1.0, 2.0],93 [3.0, 4.0],94 [5.0, 6.0],95 ]96 )97 out_ten = test_2d.scalar_mul(scale_2d).ten98 ref_ten = torch.tensor(99 [100 [0.1, 0.6, 0.6, 0.6],101 [0.6, -2.8, 0.4, -0.8],102 [1.5, 1.2, -1.8, 2.4],103 [1.0, 1.8, 2.4, -3.0],104 [0.5, 1.2, 1.8, 2.4],105 ]106 )107 assert torch.allclose(out_ten, ref_ten, atol=1e-7, rtol=1e-5)108def test_spherical_tensor_dot_product():109 dten1 = torch.rand(4, 6, 1, 101, 7)110 metadata = torch.LongTensor([[7, 9, 1, 5, 3]])111 test1 = SphericalTensor(dten1, (3,), metadata)112 dten2 = torch.rand(4, 6, 12, 101, 7)113 metadata = torch.LongTensor([[1, 2, 1, 0, 0], [7, 9, 1, 5, 3]])114 test2 = SphericalTensor(dten2, (2, 3), metadata)115 test_dot = test2.dot(test1, dim=3)116 assert torch.all(test_dot.ten.eq(test1.ten.mul(test2.ten).sum(3)))117 assert test_dot.shape == (4, 6, 12, 7)118 assert test_dot.rep_dims == (2,)119 assert torch.all(test_dot.metadata.eq(torch.LongTensor([[1, 2, 1, 0, 0]])))120 assert torch.all(test_dot.rep_layout[0].eq(test2.rep_layout[0]))121def test_spherical_tensor_rep_dot():122 # Minimal 1d example123 metadata = torch.LongTensor([[1, 2]])124 test_1d_1 = SphericalTensor(125 torch.tensor([0.2, 0.1, 0.2, 0.3, 0.2, 0.3, 0.4]), (0,), metadata126 )127 test_1d_2 = SphericalTensor(128 torch.tensor([0.4, 0.2, 0.3, -0.4, -0.4, -0.3, 0.4]), (0,), metadata129 )130 test_1d_out = test_1d_1.rep_dot(test_1d_2, dim=0)131 assert torch.allclose(132 test_1d_out,133 torch.tensor([0.08, -0.19, 0.14]),134 atol=1e-9,135 rtol=1e-7,136 )137 # 2d shape test138 dten1 = torch.rand(4, 6, 1, 101, 7)139 metadata = torch.LongTensor([[7, 9, 1, 5, 3]])140 test1 = SphericalTensor(dten1, (3,), metadata)141 dten2 = torch.rand(4, 6, 12, 101, 7)142 metadata = torch.LongTensor([[1, 2, 1, 0, 0], [7, 9, 1, 5, 3]])143 test2 = SphericalTensor(dten2, (2, 3), metadata)144 test_dot = test2.rep_dot(test1, dim=3)145 assert test_dot.shape == (4, 6, 12, 25, 7)146 assert test_dot.rep_dims == (2,)147 assert torch.all(test_dot.metadata.eq(torch.LongTensor([[1, 2, 1, 0, 0]])))148 assert torch.all(test_dot.rep_layout[0].eq(test2.rep_layout[0]))149 # When L=0 entries are positive, self rep-dot should return a150 # tensor almost the same as the invariant content within threshold _norm_eps151 test3 = SphericalTensor(152 torch.rand(32, 11, 5, 101, 7), (3,), torch.LongTensor([[7, 9, 1, 5, 3]])153 )154 dot_3 = test3.rep_dot(test3, dim=3)155 assert torch.allclose(156 dot_3,157 test3.invariant().pow(2),158 atol=1e-4,159 rtol=1e-3,160 )161def test_spherical_tensor_rep_outer():162 dten1 = torch.rand(101)163 metadata = torch.LongTensor([[7, 9, 1, 5, 3]])164 test1 = SphericalTensor(dten1, (0,), metadata)165 dten2 = torch.rand(101)166 test2 = SphericalTensor(dten2, (0,), metadata)167 test_outer = test1.rep_outer(test2)168 assert test_outer.shape == (101, 101)169 assert torch.all(test_outer.ten.eq(torch.outer(dten1, dten2)))170 assert torch.all(171 test_outer.metadata.eq(torch.tensor([[7, 9, 1, 5, 3], [7, 9, 1, 5, 3]]))172 )173 test3 = SphericalTensor(torch.rand(4, 6, 12, 101, 7), (3,), metadata)174 test4 = SphericalTensor(175 torch.rand(4, 6, 12, 12, 7), (3,), torch.LongTensor([[1, 2, 1, 0, 0]])176 )177 test_outer = test3.rep_outer(test4)178 assert test_outer.shape == (4, 6, 12, 101, 12, 7)179 assert test_outer.rep_dims == (3, 4)180 assert torch.all(181 test_outer.metadata.eq(torch.tensor([[7, 9, 1, 5, 3], [1, 2, 1, 0, 0]]))182 )183def test_spherical_tensor_invariant_contraction():184 metadata1d = torch.LongTensor([[2, 2]])185 test_1d = SphericalTensor(186 torch.tensor([0.2, -0.7, 0.1, -0.2, 0.3, 0.2, -0.3, 0.4]), (0,), metadata1d187 )188 assert torch.allclose(189 test_1d.invariant(),190 torch.tensor([0.2, -0.7, 0.4358, 0.4898]),191 atol=1e-4,192 rtol=0,193 )194 metadata2d = torch.LongTensor([[1, 1], [1, 1]])195 test_2d = SphericalTensor(196 torch.tensor(197 [198 [0.2, -0.7, 0.1, -0.2],199 [0.3, 0.2, -0.3, 0.4],200 [0.2, 0.3, 0.4, -0.5],201 [0.1, 0.2, 0.3, 0.4],202 ]203 ),204 (205 0,206 1,207 ),208 metadata2d,209 )210 assert torch.allclose(211 test_2d.invariant(),212 torch.tensor([[0.2, 0.73484], [0.37416, 1.03923]]),213 atol=1e-4,214 rtol=0,215 )216def test_spherical_tensor_fold_dim():217 metadata = torch.LongTensor([[8, 4, 2, 4, 0]])218 test = SphericalTensor(torch.rand(4, 6, 12, 58, 7), (3,), metadata)219 folded = test.fold(stride=2)220 assert folded.shape == (4, 6, 12, 29, 2, 7)221 assert torch.all(folded.metadata[0].eq(torch.LongTensor([4, 2, 1, 2, 0])))222 assert folded.num_channels == (9,)223 # Test error capturing224 metadata = torch.LongTensor([[8, 4, 2, 3, 1]])225 test = SphericalTensor(torch.rand(4, 6, 12, 60, 7), (3,), metadata)226 try:227 test.fold(stride=2)228 except AssertionError as e:229 assert (230 str(e)231 == f"The number of channels for the SphericalTensor to be folded must be multiples of "232 "stride, got (tensor([[8, 4, 2, 3, 1]]), 2) instead"233 )234def test_to_o3():235 dten1 = torch.rand(4, 6, 1, 101, 7)236 metadata = torch.LongTensor([[7, 9, 1, 5, 3]])237 test1 = SphericalTensor(dten1, (3,), metadata)238 o3_ten1 = O3Tensor.from_so3(test1, parity=1)239 assert o3_ten1.rep_dims == (3,)240 assert torch.all(241 o3_ten1.metadata.eq(torch.LongTensor([[7, 0, 9, 0, 1, 0, 5, 0, 3, 0]]))242 )243 assert torch.all(o3_ten1.rep_layout[0][3].eq(torch.ones(101, dtype=torch.long)))244 assert torch.all(o3_ten1.rep_layout[0][:3].eq(test1.rep_layout[0]))245 o3_ten2 = O3Tensor.from_so3(test1, parity=-1)246 assert o3_ten2.rep_dims == (3,)247 assert torch.all(248 o3_ten2.metadata.eq(torch.LongTensor([[0, 7, 0, 9, 0, 1, 0, 5, 0, 3]]))249 )250 assert torch.all(o3_ten2.rep_layout[0][3].eq(-torch.ones(101, dtype=torch.long)))...

Full Screen

Full Screen

helpers.py

Source:helpers.py Github

copy

Full Screen

1# import libraries2import numpy as np3from sklearn.utils import shuffle4from sklearn.model_selection import GridSearchCV, GroupKFold, StratifiedKFold5import matplotlib.pyplot as plt6from sklearn.linear_model import LogisticRegression7from sklearn.model_selection import train_test_split8from sklearn.calibration import CalibratedClassifierCV, calibration_curve9#from keras.models import Sequential10#from keras.layers import Dense11#from keras.layers import Flatten12#from keras.layers import Dropout13#from keras.layers.convolutional import Conv1D14#from keras.layers.convolutional import MaxPooling1D15def plot_calibration_curve(X, y, model, model_name, outer_cv):16 """17 Plot calibration curve for model w/o and with calibration. 18 """19 # Calibrated with isotonic/sigmoid calibration20 isotonic = CalibratedClassifierCV(model, cv=outer_cv, method='isotonic')21 sigmoid = CalibratedClassifierCV(model, cv=outer_cv, method='sigmoid')22 lr = LogisticRegression(C=1.) # baseline23 fig = plt.figure(figsize=(10, 10))24 ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)25 ax2 = plt.subplot2grid((3, 1), (2, 0))26 ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")27 28 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)29 for clf, name in [(lr, 'Logistic'), (model, model_name), (isotonic, model_name + ' + Isotonic'), 30 (sigmoid, model_name + ' + Sigmoid')]:31 clf.fit(X_train, y_train)32 y_pred = clf.predict(X_test)33 if hasattr(clf, "predict_proba"):34 prob_pos = clf.predict_proba(X_test)[:, 1]35 else: # use decision function36 prob_pos = clf.decision_function(X_test)37 prob_pos = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())38 fraction_of_positives, mean_predicted_value = calibration_curve(y_test, prob_pos, n_bins=10)39 ax1.plot(mean_predicted_value, fraction_of_positives, "s-",40 label=name)41 ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,42 histtype="step", lw=2)43 ax1.set_ylabel("Fraction of positives")44 ax1.set_ylim([-0.05, 1.05])45 ax1.legend(loc="lower right")46 ax1.set_title('Calibration plots (reliability curve)')47 ax2.set_xlabel("Mean predicted value")48 ax2.set_ylabel("Count")49 ax2.legend(loc="upper center", ncol=2)50 51 fig.savefig('calibration'+model_name+'.png', dpi=300)52 #plt.tight_layout()53def process_testdata(file):54 """55 Process the testdata from an .xlsx file into a transformed np.array that is ready for predictions.56 """57 # import libraries58 import re59 import pandas as pd60 from scipy import signal, integrate61 # get the data62 raw_data = pd.ExcelFile(file)63 sheetname = re.search('data/.+xlsx', file).group(0)[5:-5]64 test_data = np.matrix(raw_data.parse(sheetname, header=3)).T[1:,0:58] # first column (thus row in DF) is timestamp65 66 # get blanks and substract67 blanks = np.concatenate((test_data[34, :], test_data[35, :], test_data[46, :], test_data[47, :], test_data[58, :],68 test_data[59, :], test_data[70, :], test_data[71, :]), axis=0)69 blanks_median = np.median(blanks, axis=0)70 test_data = np.delete(test_data, obj=[34, 35, 46, 47, 58, 59, 70, 71], axis=0)71 processed_data = np.asarray(test_data) #- blanks_median72 # transform the data73 transformed_sav1 = signal.savgol_filter(processed_data, window_length=11, polyorder=5, deriv=1)74 transformed_sav5 = signal.savgol_filter(processed_data, window_length=21, polyorder=2, deriv=2)75 transformed_int = np.zeros((processed_data.shape[0], processed_data.shape[1]))76 for i in range(transformed_int.shape[0]):77 transformed_int[i,:] = integrate.cumtrapz(processed_data[i,:], initial=0)78 79 return processed_data, transformed_sav1, transformed_sav5, transformed_int, blanks_median80def predict_testdata(trained_model, testdata, name):81 """82 Predict testcases on processed/transformed testdata for a given model.83 """84 import pandas as pd85 preds = trained_model.predict(testdata)86 probs = trained_model.predict_proba(testdata)87 probs = [probs[i][1] for i in range(probs.shape[0])]88 predictions_df = pd.DataFrame({name+'_probability': probs, name+'_prediction': preds})89 return predictions_df90def NestedGroupKFoldProba(model, X, y, parameter_grid, groups, n_classes, scorer, 91 inner_cv=GroupKFold(n_splits=4), outer_cv=GroupKFold(n_splits=4), weights=[], cpus=1):92 """93 Implements a nested version of GroupKFold cross-validation using GridSearchCV to evaluate models 94 that need hyperparameter tuning in settings where different groups exist in the available data.95 96 Dependencies: sklearn.model_selection, numpy97 98 Input:99 - X, y: features and labels (must be NumPy arrays).100 - model, parameter_grid: the model instance and its parameter grid to be optimized.101 - groups: the groups to use in both inner- and outer loop.102 - n_classes: the number of classes in the prediction problem103 - scorer: the scoring to use in inner loop.104 - inner_cv, outer_cv: the iterators for both CV-loops (default: GroupKFold(n_splits=4))105 - weights: sample weights to account for more important samples.106 107 Output: cross-validated predicted class probabilities108 """109 # define empty matrix to store performances (n CV runs and four performance metrics)110 probabilities = np.zeros((X.shape[0], n_classes))111 preds = np.zeros(X.shape[0])112 113 # define outer loop114 for train_outer, test_outer in outer_cv.split(X, y, groups):115 X_train, X_test = X[train_outer], X[test_outer]116 y_train, y_test = y[train_outer], y[test_outer]117 groups_train, groups_test = groups[train_outer], groups[test_outer]118 119 # define inner loop (in GridSearchCV)120 tuned_model = GridSearchCV(model, cv=inner_cv, param_grid=parameter_grid, scoring=scorer, njobs=cpus)121 if len(weights) == 0:122 tuned_model.fit(X_train, y_train, groups=groups_train)123 else:124 weights_train = weights[train_outer]125 tuned_model.fit(X_train, y_train, groups=groups_train, **{'sample_weight': weights_train})126 127 # make predictions for test set (outer loop)128 y_probs = tuned_model.predict_proba(X_test)129 y_preds = tuned_model.predict(X_test)130 131 for i, index in enumerate(test_outer):132 probabilities[index,:] = y_probs[i,:]133 preds[index] = y_preds[i]134 135 return probabilities, preds136def NestedShuffledKFoldProba(model, X, y, parameter_grid, n_classes, scorer, 137 inner_cv=StratifiedKFold(n_splits=10), outer_cv=StratifiedKFold(n_splits=10), 138 weights=[], cpus=1):139 """140 Implements a nested version of GroupKFold cross-validation using GridSearchCV to evaluate models 141 that need hyperparameter tuning in settings where different groups exist in the available data.142 143 Dependencies: sklearn.model_selection, numpy144 145 Input:146 - X, y: features and labels (must be NumPy arrays).147 - model, parameter_grid: the model instance and its parameter grid to be optimized.148 - n_classes: the number of classes in the prediction problem149 - scorer: the scoring to use in inner loop.150 - inner_cv, outer_cv: the StratifiedShuffleSplit iterators for both CV-loops (default n_splits: 10)151 - weights: sample weights to account for more important samples152 153 Output: cross-validated predicted class probabilities154 """155 # define empty matrix to store performances (n CV runs and four performance metrics)156 probabilities = np.zeros((X.shape[0], n_classes))157 preds = np.zeros(X.shape[0])158 159 # shuffle data160 indices = np.asarray(range(X.shape[0]), dtype=int)161 if len(weights) == 0:162 X, y, indices = shuffle(X, y, indices, random_state=0)163 else:164 X, y, weights, indices = shuffle(X, y, weights, indices, random_state=0)165 166 # define outer loop167 for train_outer, test_outer in outer_cv.split(X, y):168 X_train, X_test = X[train_outer], X[test_outer]169 y_train, y_test = y[train_outer], y[test_outer]170 indices_train, indices_test = indices[train_outer], indices[test_outer]171 172 # define inner loop (in GridSearchCV)173 tuned_model = GridSearchCV(model, cv=inner_cv, param_grid=parameter_grid, scoring=scorer, n_jobs=cpus)174 if len(weights) == 0:175 tuned_model.fit(X_train, y_train)176 else:177 weights_train = weights[train_outer]178 tuned_model.fit(X_train, y_train, **{'sample_weight': weights_train})179 180 # make predictions for test set (outer loop)181 y_probs = tuned_model.predict_proba(X_test)182 y_preds = tuned_model.predict(X_test)183 184 for i, index in enumerate(test_outer):185 original_index = indices_test[i]186 probabilities[original_index,:] = y_probs[i,:]187 preds[original_index] = y_preds[i]188 189 return probabilities, preds ...

Full Screen

Full Screen

model_selection-checkpoint.py

Source:model_selection-checkpoint.py Github

copy

Full Screen

1from sklearn.model_selection import KFold, GridSearchCV2import numpy as np3from sklearn.metrics import roc_auc_score4from utils.fairness_functions import compute_fairness5from sklearn.calibration import CalibratedClassifierCV6def cross_validate(X, Y, estimator, c_grid, seed):7 """Performs cross validation and selects a model given X and Y dataframes, 8 an estimator, a dictionary of parameters, and a random seed. 9 """10 # settings 11 n_splits = 512 scoring = 'roc_auc'13 cross_validation = KFold(n_splits=n_splits, shuffle=True, random_state=seed)14 clf = GridSearchCV(estimator=estimator, param_grid=c_grid, scoring=scoring,15 cv=cross_validation, return_train_score=True).fit(X, Y)16 mean_train_score = clf.cv_results_['mean_train_score']17 mean_test_score = clf.cv_results_['mean_test_score']18 test_std = clf.cv_results_['std_test_score']19 # scores20 best_auc = clf.best_score_21 best_std = test_std[np.where(mean_test_score == clf.best_score_)[0][0]]22 best_param = clf.best_params_23 auc_diff = mean_train_score[np.where(mean_test_score == clf.best_score_)[24 0][0]] - clf.best_score_25 return mean_train_score, mean_test_score, test_std, best_auc, best_std, best_param, auc_diff26def nested_cross_validate(X, Y, estimator, c_grid, seed, index = None):27 28 ## outer cv29 train_outer = []30 test_outer = []31 outer_cv = KFold(n_splits=5, random_state=seed, shuffle=True)32 33 ## 5 sets of train & test index34 for train, test in outer_cv.split(X, Y):35 train_outer.append(train)36 test_outer.append(test)37 38 ## storing lists39 holdout_auc = []40 best_params = []41 auc_diffs = []42 fairness_overviews = []43 ## inner cv44 inner_cv = KFold(n_splits=5, shuffle=True, random_state=seed)45 for i in range(len(train_outer)):46 47 ## subset train & test sets in inner loop48 train_x, test_x = X.iloc[train_outer[i]], X.iloc[test_outer[i]]49 train_y, test_y = Y[train_outer[i]], Y[test_outer[i]]50 51 ## holdout test with "race" for fairness52 holdout_with_attrs = test_x.copy()53 54 ## remove unused feature in modeling55 train_x = train_x.drop(['person_id', 'screening_date', 'race'], axis=1)56 test_x = test_x.drop(['person_id', 'screening_date', 'race'], axis=1)57 58 ## GridSearch: inner CV59 clf = GridSearchCV(estimator=estimator, param_grid=c_grid, scoring='roc_auc',60 cv=inner_cv, return_train_score=True).fit(train_x, train_y)61 ## best parameter & scores62 mean_train_score = clf.cv_results_['mean_train_score']63 mean_test_score = clf.cv_results_['mean_test_score'] 64 best_param = clf.best_params_65 auc_diffs.append(mean_train_score[np.where(mean_test_score == clf.best_score_)[0][0]] - clf.best_score_)66 ## train model on best param67 if index == 'svm':68 best_model = CalibratedClassifierCV(clf, cv=5)69 best_model.fit(train_x, train_y)70 prob = best_model.predict_proba(test_x)[:, 1]71 holdout_pred = best_model.predict(test_x)72 else:73 best_model = clf.fit(train_x, train_y)74 prob = best_model.predict_proba(test_x)[:, 1]75 holdout_pred = best_model.predict(test_x)76 ## fairness 77 holdout_fairness_overview = compute_fairness(df=holdout_with_attrs,78 preds=holdout_pred,79 labels=test_y)80 fairness_overviews.append(holdout_fairness_overview)81 ## store results82 holdout_auc.append(roc_auc_score(test_y, prob))83 best_params.append(best_param)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful