How to use helper1 method in green

Best Python code snippet using green

parameters_finding.py

Source:parameters_finding.py Github

copy

Full Screen

1import numpy as np2import pandas as pd3from sklearn.ensemble import AdaBoostClassifier4from sklearn.ensemble import GradientBoostingClassifier5from sklearn.model_selection import GridSearchCV6from sklearn.neighbors import KNeighborsClassifier7from sklearn.neural_network import MLPClassifier8from sklearn.svm import SVC9from sklearn.tree import DecisionTreeClassifier10from helpers import get_wine_data11from helpers import get_abalone_data12class EstimatorSelectionHelper:13 def __init__(self, models, params):14 if not set(models.keys()).issubset(set(params.keys())):15 missing_params = list(set(models.keys()) - set(params.keys()))16 raise ValueError("Some estimators are missing parameters: %s" % missing_params)17 self.models = models18 self.params = params19 self.keys = models.keys()20 self.grid_searches = {}21 def fit(self, X, y, cv=3, n_jobs=3, verbose=1, scoring=None, refit=False):22 for key in self.keys:23 print("Running GridSearchCV for %s." % key)24 model = self.models[key]25 params = self.params[key]26 gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,27 verbose=verbose, scoring=scoring, refit=refit,28 return_train_score=True)29 gs.fit(X,y)30 self.grid_searches[key] = gs31 def score_summary(self, sort_by='mean_score'):32 def row(key, scores, params):33 d = {34 'estimator': key,35 'min_score': min(scores),36 'max_score': max(scores),37 'mean_score': np.mean(scores),38 'std_score': np.std(scores),39 }40 return pd.Series({**params,**d})41 rows = []42 for k in self.grid_searches:43 print(k)44 params = self.grid_searches[k].cv_results_['params']45 scores = []46 for i in range(self.grid_searches[k].cv):47 key = "split{}_test_score".format(i)48 r = self.grid_searches[k].cv_results_[key]49 scores.append(r.reshape(len(params),1))50 all_scores = np.hstack(scores)51 for p, s in zip(params,all_scores):52 rows.append((row(k, s, p)))53 df = pd.concat(rows, axis=1).T.sort_values([sort_by], ascending=False)54 columns = ['estimator', 'mean_score', 'max_score', 'std_score']55 columns = columns + [c for c in df.columns if c not in columns]56 return df[columns]57models1 = {58 'AdaBoostClassifier': AdaBoostClassifier(base_estimator=DecisionTreeClassifier()),59 'DecisionTreeClassifier': DecisionTreeClassifier(),60 'KNeighborsClassifier': KNeighborsClassifier()61}62params1 = {63 'AdaBoostClassifier': {64 'n_estimators': [1, 3, 5, 7, 9, 11, 13, 15],65 "base_estimator__criterion": ["gini"],66 "base_estimator__splitter": ["best", "random"],67 'base_estimator__max_depth': [None, 1, 2, 3],68 'base_estimator__min_samples_leaf': [1, 2, 3, 4, 5]69 },70 'DecisionTreeClassifier': {'max_depth': [None, 1, 2, 3], 'min_samples_leaf': [1, 2, 3, 4, 5]},71 'KNeighborsClassifier': {'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9]}72}73models2 = {74 'SVC': SVC()75}76params2 = {77 'SVC': [78 {'kernel': ['linear'], 'C': [1, 10]},79 {'kernel': ['rbf'], 'C': [1, 10], 'gamma': [0.001, 0.0001]},80 ]81}82models3 = {83 'MLPClassifier': MLPClassifier()84}85params3 = {86 'MLPClassifier': {87 'solver': ['lbfgs'],88 'max_iter': [1, 3, 5, 7, 9],89 'alpha': 10.0 ** -np.arange(1, 10),90 'hidden_layer_sizes': np.arange(10, 15),91 'random_state': [0,1]92 }93}94if __name__ == "__main__":95 X, y = get_wine_data()96 X1, y1 = get_abalone_data()97 helper1 = EstimatorSelectionHelper(models1, params1)98 helper1.fit(X, y, scoring='accuracy', n_jobs=4, cv=5)99 results = (helper1.score_summary(sort_by='max_score'))100 results.to_csv("out/wine_params_4.csv")101 helper1 = EstimatorSelectionHelper(models1, params1)102 helper1.fit(X1, y1, scoring='accuracy', n_jobs=4, cv=5)103 results = (helper1.score_summary(sort_by='max_score'))104 results.to_csv("out/abalone_params_4.csv")105 # helper1 = EstimatorSelectionHelper(models2, params2)106 # helper1.fit(X, y, scoring='accuracy', n_jobs=4, cv=5)107 # results = (helper1.score_summary(sort_by='max_score'))108 # results.to_csv("out/wine_params_2.csv")109 # helper1 = EstimatorSelectionHelper(models2, params2)110 # helper1.fit(X1, y1, scoring='accuracy', n_jobs=4, cv=5)111 # results = (helper1.score_summary(sort_by='max_score'))112 # results.to_csv("out/abalone_params_2.csv")113 # helper1 = EstimatorSelectionHelper(models3, params3)114 # helper1.fit(X, y, scoring='accuracy', n_jobs=4, cv=5)115 # results = (helper1.score_summary(sort_by='max_score'))116 # results.to_csv("out/wine_params_3.csv")117 # helper1 = EstimatorSelectionHelper(models3, params3)118 # helper1.fit(X1, y1, scoring='accuracy', n_jobs=4, cv=5)119 # results = (helper1.score_summary(sort_by='max_score'))...

Full Screen

Full Screen

parameters_finding_non_svc.py

Source:parameters_finding_non_svc.py Github

copy

Full Screen

1import numpy as np2import pandas as pd3from sklearn.ensemble import AdaBoostClassifier4from sklearn.ensemble import GradientBoostingClassifier5from sklearn.model_selection import GridSearchCV6from sklearn.neighbors import KNeighborsClassifier7from sklearn.neural_network import MLPClassifier8from sklearn.svm import SVC9from sklearn.tree import DecisionTreeClassifier10from helpers import get_wine_data11from helpers import get_abalone_data12class EstimatorSelectionHelper:13 def __init__(self, models, params):14 if not set(models.keys()).issubset(set(params.keys())):15 missing_params = list(set(models.keys()) - set(params.keys()))16 raise ValueError("Some estimators are missing parameters: %s" % missing_params)17 self.models = models18 self.params = params19 self.keys = models.keys()20 self.grid_searches = {}21 def fit(self, X, y, cv=3, n_jobs=3, verbose=1, scoring=None, refit=False):22 for key in self.keys:23 print("Running GridSearchCV for %s." % key)24 model = self.models[key]25 params = self.params[key]26 gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,27 verbose=verbose, scoring=scoring, refit=refit,28 return_train_score=True)29 gs.fit(X,y)30 self.grid_searches[key] = gs31 def score_summary(self, sort_by='mean_score'):32 def row(key, scores, params):33 d = {34 'estimator': key,35 'min_score': min(scores),36 'max_score': max(scores),37 'mean_score': np.mean(scores),38 'std_score': np.std(scores),39 }40 return pd.Series({**params,**d})41 rows = []42 for k in self.grid_searches:43 print(k)44 params = self.grid_searches[k].cv_results_['params']45 scores = []46 for i in range(self.grid_searches[k].cv):47 key = "split{}_test_score".format(i)48 r = self.grid_searches[k].cv_results_[key]49 scores.append(r.reshape(len(params),1))50 all_scores = np.hstack(scores)51 for p, s in zip(params,all_scores):52 rows.append((row(k, s, p)))53 df = pd.concat(rows, axis=1).T.sort_values([sort_by], ascending=False)54 columns = ['estimator', 'mean_score', 'max_score', 'std_score']55 columns = columns + [c for c in df.columns if c not in columns]56 return df[columns]57models1 = {58 'AdaBoostClassifier': AdaBoostClassifier(),59 'GradientBoostingClassifier': GradientBoostingClassifier(),60 'DecisionTreeClassifier': DecisionTreeClassifier(),61 'KNeighborsClassifier': KNeighborsClassifier()62}63params1 = {64 'AdaBoostClassifier': {'n_estimators': np.arange(1, 20)},65 'GradientBoostingClassifier': {'n_estimators': np.arange(1, 20), 'learning_rate': 0.1 ** np.arange(1, 10)},66 'DecisionTreeClassifier': {'max_depth': np.arange(1, 20), 'min_samples_leaf': np.arange(1, 20)},67 'KNeighborsClassifier': {'n_neighbors': np.arange(1, 10)}68}69models2 = {70 'SVC': SVC()71}72params2 = {73 'SVC': [74 {'kernel': ['linear'], 'C': [1, 10]},75 {'kernel': ['rbf'], 'C': [1, 10], 'gamma': [0.001, 0.0001]},76 ]77}78models3 = {79 'MLPClassifier': MLPClassifier()80}81params3 = {82 'MLPClassifier': {83 'solver': ['lbfgs'],84 'max_iter': [1, 3, 5, 7, 9],85 'alpha': 10.0 ** -np.arange(1, 10),86 'hidden_layer_sizes': np.arange(10, 15),87 'random_state': [0,1]88 }89}90if __name__ == "__main__":91 X, y = get_wine_data()92 X1, y1 = get_abalone_data()93 # helper1 = EstimatorSelectionHelper(models1, params1)94 # helper1.fit(X, y, scoring='accuracy', n_jobs=4, cv=5)95 # results = (helper1.score_summary(sort_by='max_score'))96 # results.to_csv("out/wine_params_1.csv")97 # helper1 = EstimatorSelectionHelper(models1, params1)98 # helper1.fit(X1, y1, scoring='accuracy', n_jobs=4, cv=5)99 # results = (helper1.score_summary(sort_by='max_score'))100 # results.to_csv("out/abalone_params_1.csv")101 # helper1 = EstimatorSelectionHelper(models2, params2)102 # helper1.fit(X, y, scoring='accuracy', n_jobs=4, cv=5)103 # results = (helper1.score_summary(sort_by='max_score'))104 # results.to_csv("out/wine_params_2.csv")105 # helper1 = EstimatorSelectionHelper(models2, params2)106 # helper1.fit(X1, y1, scoring='accuracy', n_jobs=4, cv=5)107 # results = (helper1.score_summary(sort_by='max_score'))108 # results.to_csv("out/abalone_params_2.csv")109 helper1 = EstimatorSelectionHelper(models3, params3)110 helper1.fit(X, y, scoring='accuracy', n_jobs=4, cv=5)111 results = (helper1.score_summary(sort_by='max_score'))112 results.to_csv("out/wine_params_3.csv")113 helper1 = EstimatorSelectionHelper(models3, params3)114 helper1.fit(X1, y1, scoring='accuracy', n_jobs=4, cv=5)115 results = (helper1.score_summary(sort_by='max_score'))...

Full Screen

Full Screen

parameters_finding_ann.py

Source:parameters_finding_ann.py Github

copy

Full Screen

1import numpy as np2import pandas as pd3from sklearn.ensemble import AdaBoostClassifier4from sklearn.ensemble import GradientBoostingClassifier5from sklearn.model_selection import GridSearchCV6from sklearn.neighbors import KNeighborsClassifier7from sklearn.neural_network import MLPClassifier8from sklearn.svm import SVC9from sklearn.tree import DecisionTreeClassifier10from helpers import get_wine_data11from helpers import get_abalone_data12class EstimatorSelectionHelper:13 def __init__(self, models, params):14 if not set(models.keys()).issubset(set(params.keys())):15 missing_params = list(set(models.keys()) - set(params.keys()))16 raise ValueError("Some estimators are missing parameters: %s" % missing_params)17 self.models = models18 self.params = params19 self.keys = models.keys()20 self.grid_searches = {}21 def fit(self, X, y, cv=3, n_jobs=3, verbose=1, scoring=None, refit=False):22 for key in self.keys:23 print("Running GridSearchCV for %s." % key)24 model = self.models[key]25 params = self.params[key]26 gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,27 verbose=verbose, scoring=scoring, refit=refit,28 return_train_score=True)29 gs.fit(X,y)30 self.grid_searches[key] = gs31 def score_summary(self, sort_by='mean_score'):32 def row(key, scores, params):33 d = {34 'estimator': key,35 'min_score': min(scores),36 'max_score': max(scores),37 'mean_score': np.mean(scores),38 'std_score': np.std(scores),39 }40 return pd.Series({**params,**d})41 rows = []42 for k in self.grid_searches:43 print(k)44 params = self.grid_searches[k].cv_results_['params']45 scores = []46 for i in range(self.grid_searches[k].cv):47 key = "split{}_test_score".format(i)48 r = self.grid_searches[k].cv_results_[key]49 scores.append(r.reshape(len(params),1))50 all_scores = np.hstack(scores)51 for p, s in zip(params,all_scores):52 rows.append((row(k, s, p)))53 df = pd.concat(rows, axis=1).T.sort_values([sort_by], ascending=False)54 columns = ['estimator', 'mean_score', 'max_score', 'std_score']55 columns = columns + [c for c in df.columns if c not in columns]56 return df[columns]57models1 = {58 'AdaBoostClassifier': AdaBoostClassifier(),59 'GradientBoostingClassifier': GradientBoostingClassifier(),60 'DecisionTreeClassifier': DecisionTreeClassifier(),61 'KNeighborsClassifier': KNeighborsClassifier()62}63params1 = {64 'AdaBoostClassifier': {'n_estimators': np.arange(1, 20)},65 'GradientBoostingClassifier': {'n_estimators': np.arange(1, 20), 'learning_rate': 0.1 ** np.arange(1, 10)},66 'DecisionTreeClassifier': {'max_depth': np.arange(1, 20), 'min_samples_leaf': np.arange(1, 20)},67 'KNeighborsClassifier': {'n_neighbors': np.arange(1, 10)}68}69models2 = {70 'SVC': SVC()71}72params2 = {73 'SVC': [74 {'kernel': ['linear'], 'C': [1, 10]},75 {'kernel': ['rbf'], 'C': [1, 10], 'gamma': [0.001, 0.0001]},76 ]77}78models3 = {79 'MLPClassifier': MLPClassifier()80}81params3 = {82 'MLPClassifier': {83 'solver': ['lbfgs'],84 'max_iter': [1, 3, 5, 7, 9],85 'alpha': 10.0 ** -np.arange(1, 10),86 'hidden_layer_sizes': np.arange(10, 15),87 'random_state': [0,1]88 }89}90if __name__ == "__main__":91 X, y = get_wine_data()92 X1, y1 = get_abalone_data()93 # helper1 = EstimatorSelectionHelper(models1, params1)94 # helper1.fit(X, y, scoring='accuracy', n_jobs=4, cv=5)95 # results = (helper1.score_summary(sort_by='max_score'))96 # results.to_csv("out/wine_params_1.csv")97 # helper1 = EstimatorSelectionHelper(models1, params1)98 # helper1.fit(X1, y1, scoring='accuracy', n_jobs=4, cv=5)99 # results = (helper1.score_summary(sort_by='max_score'))100 # results.to_csv("out/abalone_params_1.csv")101 helper1 = EstimatorSelectionHelper(models2, params2)102 helper1.fit(X, y, scoring='accuracy', n_jobs=4, cv=5)103 results = (helper1.score_summary(sort_by='max_score'))104 results.to_csv("out/wine_params_2.csv")105 helper1 = EstimatorSelectionHelper(models2, params2)106 helper1.fit(X1, y1, scoring='accuracy', n_jobs=4, cv=5)107 results = (helper1.score_summary(sort_by='max_score'))108 results.to_csv("out/abalone_params_2.csv")109 # helper1 = EstimatorSelectionHelper(models3, params3)110 # helper1.fit(X, y, scoring='accuracy', n_jobs=4, cv=5)111 # results = (helper1.score_summary(sort_by='max_score'))112 # results.to_csv("out/wine_params_3.csv")113 # helper1 = EstimatorSelectionHelper(models3, params3)114 # helper1.fit(X1, y1, scoring='accuracy', n_jobs=4, cv=5)115 # results = (helper1.score_summary(sort_by='max_score'))...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run green automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful