How to use test_submit_id method in lettuce_webdriver

Best Python code snippet using lettuce_webdriver_python

genetic-algorithm-for-parameterisation.py

Source:genetic-algorithm-for-parameterisation.py Github

copy

Full Screen

1import random2import numpy as np3import xgboost as xgb4import pandas as pd5from sklearn.preprocessing import LabelEncoder6from sklearn.metrics import r2_score7from deap import base8from deap import creator9from deap import tools10#######################11# Load & process data12#######################13def get_data():14 #################15 # read datasets16 #################17 train = pd.read_csv('../input/train.csv')18 test_submit = pd.read_csv('../input/test.csv')19 # Get y and ID20 train = train[train.y < 250] # Optional: Drop y outliers21 y_train = train['y']22 train = train.drop('y', 1)23 test_submit_id = test_submit['ID']24 #########################25 # Create data26 #########################27 features = ['X0',28 'X5',29 'X118',30 'X127',31 'X47',32 'X315',33 'X311',34 'X179',35 'X314',36 'X232',37 'X29',38 'X263',39 'X261']40 # Build a new dataset using key parameters, lots of drops41 train = train[features]42 test_submit = test_submit[features]43 # Label encoder44 for c in train.columns:45 if train[c].dtype == 'object':46 lbl = LabelEncoder()47 lbl.fit(list(train[c].values) + list(test_submit[c].values))48 train[c] = lbl.transform(list(train[c].values))49 test_submit[c] = lbl.transform(list(test_submit[c].values))50 # Convert to matrix51 train = train.as_matrix()52 y_train = np.transpose([y_train.as_matrix()])53 test_submit = test_submit.as_matrix()54 test_submit_id = test_submit_id.as_matrix()55 return train, y_train, test_submit, test_submit_id56#########################57# XGBoost Model58#########################59def gradient_boost(train, y_train, params):60 y_mean_train = np.mean(y_train)61 # prepare dict of params for xgboost to run with62 xgb_params = {63 'n_trees': params[0],64 'eta': params[1],65 'max_depth': params[2],66 'subsample': params[3],67 'objective': 'reg:linear',68 'eval_metric': 'rmse',69 'base_score': y_mean_train,70 'seed': 123456789,71 'silent': 172 }73 # form DMatrices for Xgboost training74 dtrain = xgb.DMatrix(train, y_train)75 # xgboost, cross-validation76 cv_result = xgb.cv(xgb_params,77 dtrain,78 nfold = 10,79 num_boost_round=5000,80 early_stopping_rounds=100,81 verbose_eval=False,82 show_stdv=False83 )84 num_boost_rounds = len(cv_result)85 # train model86 model = xgb.train(dict(xgb_params), dtrain, num_boost_round=num_boost_rounds)87 # get model accuracy88 accuracy = r2_score(dtrain.get_label(), model.predict(dtrain))89 return model, accuracy90######################91# Genetic algorithm92######################93creator.create("FitnessMax", base.Fitness, weights=(1.0,))94creator.create("Individual", list, fitness=creator.FitnessMax)95toolbox = base.Toolbox()96# Get data97train, y_train, test_submit, test_submit_id = get_data()98# Attribute generator99toolbox.register("n_trees", random.randint, 100, 10000)100toolbox.register("eta", random.uniform, 0.0001, 0.01)101toolbox.register("max_depth", random.randint, 1, 10)102toolbox.register("subsample", random.uniform, 0, 1)103# Structure initializers104toolbox.register("individual", tools.initCycle, creator.Individual,105 (toolbox.n_trees, toolbox.eta, toolbox.max_depth, toolbox.subsample), n=1)106# define the population to be a list of individuals107toolbox.register("population", tools.initRepeat, list, toolbox.individual)108# the goal ('fitness') function to be maximized109def evalOneMax(individual):110 model, accuracy = gradient_boost(train, y_train, individual)111 return [accuracy]112# the model for a specified individual113def getModel(individual):114 model, accuracy = gradient_boost(train, y_train, individual)115 return model116# register the goal / fitness function117toolbox.register("evaluate", evalOneMax)118# register the crossover operator119toolbox.register("mate", tools.cxTwoPoint)120# register a mutation operator121toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)122# operator for selecting individuals for breeding the next123# generation124toolbox.register("select", tools.selTournament, tournsize=3)125def main():126 random.seed(12345)127 # create an initial population128 pop = toolbox.population(n=20)129 # CXPB is the probability with which two individuals130 # are crossed131 #132 # MUTPB is the probability for mutating an individual133 CXPB, MUTPB = 0.5, 0.2134 print("Start of evolution")135 # Evaluate the entire population136 fitnesses = list(map(toolbox.evaluate, pop))137 for ind, fit in zip(pop, fitnesses):138 ind.fitness.values = fit139 print(" Evaluated %i individuals" % len(pop))140 # Extracting all the fitnesses of141 fits = [ind.fitness.values[0] for ind in pop]142 # Variable keeping track of the number of generations143 g = 0144 # Begin the evolution145 while max(fits) < 100 and g < 1000:146 # A new generation147 g = g + 1148 print("-- Generation %i --" % g)149 # Select the next generation individuals150 offspring = toolbox.select(pop, len(pop))151 # Clone the selected individuals152 offspring = list(map(toolbox.clone, offspring))153 # Apply crossover and mutation on the offspring154 for child1, child2 in zip(offspring[::2], offspring[1::2]):155 # cross two individuals with probability CXPB156 if random.random() < CXPB:157 toolbox.mate(child1, child2)158 # fitness values of the children159 # must be recalculated later160 del child1.fitness.values161 del child2.fitness.values162 for mutant in offspring:163 # mutate an individual with probability MUTPB164 if random.random() < MUTPB:165 toolbox.mutate(mutant)166 del mutant.fitness.values167 # Evaluate the individuals with an invalid fitness168 invalid_ind = [ind for ind in offspring if not ind.fitness.valid]169 fitnesses = map(toolbox.evaluate, invalid_ind)170 for ind, fit in zip(invalid_ind, fitnesses):171 ind.fitness.values = fit172 print(" Evaluated %i individuals" % len(invalid_ind))173 # The population is entirely replaced by the offspring174 pop[:] = offspring175 # Gather all the fitnesses in one list and print the stats176 fits = [ind.fitness.values[0] for ind in pop]177 length = len(pop)178 mean = sum(fits) / length179 sum2 = sum(x * x for x in fits)180 std = abs(sum2 / length - mean ** 2) ** 0.5181 print(" Min %s" % min(fits))182 print(" Max %s" % max(fits))183 print(" Avg %s" % mean)184 print(" Std %s" % std)185 best_ind = tools.selBest(pop, 1)[0]186 print("Best individual so far is %s, %s" % (best_ind, best_ind.fitness.values))187 print("-- End of (successful) evolution --")188 best_ind = tools.selBest(pop, 1)[0]189 print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))190if __name__ == "__main__":...

Full Screen

Full Screen

deep-neural-network-using-tensorflow.py

Source:deep-neural-network-using-tensorflow.py Github

copy

Full Screen

1import numpy as np2import pandas as pd3from sklearn.preprocessing import LabelEncoder4import tensorflow as tf5from sklearn.metrics import r2_score6from sklearn.model_selection import KFold7###################8# Load & process data9###################10def get_data():11 #################12 # read datasets13 #################14 train = pd.read_csv('../input/train.csv')15 test_submit = pd.read_csv('../input/test.csv')16 # Get y and ID17 train = train[train.y < 250] # Optional: Drop y outliers18 y_train = train['y']19 train = train.drop('y', 1)20 test_submit_id = test_submit['ID']21 #########################22 # Create data23 #########################24 features = ['X0',25 'X5',26 'X118',27 'X127',28 'X47',29 'X315',30 'X311',31 'X179',32 'X314',33 'X232',34 'X29',35 'X263',36 'X261']37 # Build a new dataset using key parameters, lots of drops38 train = train[features]39 test_submit = test_submit[features]40 # Label encoder41 for c in train.columns:42 if train[c].dtype == 'object':43 lbl = LabelEncoder()44 lbl.fit(list(train[c].values) + list(test_submit[c].values))45 train[c] = lbl.transform(list(train[c].values))46 test_submit[c] = lbl.transform(list(test_submit[c].values))47 # Convert to matrix48 train = train.as_matrix()49 y_train = np.transpose([y_train.as_matrix()])50 test_submit = test_submit.as_matrix()51 test_submit_id = test_submit_id.as_matrix()52 #print(train.shape)53 #print(test_submit.shape)54 return train, y_train, test_submit, test_submit_id55#####################56# Neural Network57#####################58# Training steps59STEPS = 50060LEARNING_RATE = 0.000161BETA = 0.0162DROPOUT = 0.563RANDOM_SEED = 1234564MAX_Y = 25065RESTORE = True66START = 067# Training variables68IN_DIM = 1369# Network Parameters - Hidden layers70n_hidden_1 = 10071n_hidden_2 = 5072def weight_variable(shape):73 initial = tf.truncated_normal(shape, stddev=0.01)74 return tf.Variable(initial)75def bias_variable(shape):76 initial = tf.constant(0.03, shape=shape)77 return tf.Variable(initial)78def deep_network(inputs, keep_prob):79 # Input -> Hidden Layer80 w1 = weight_variable([IN_DIM, n_hidden_1])81 b1 = bias_variable([n_hidden_1])82 # Hidden Layer -> Hidden Layer83 w2 = weight_variable([n_hidden_1, n_hidden_2])84 b2 = bias_variable([n_hidden_2])85 # Hidden Layer -> Output86 w3 = weight_variable([n_hidden_2, 1])87 b3 = bias_variable([1])88 # 1st Hidden layer with dropout89 h1 = tf.nn.relu(tf.matmul(inputs, w1) + b1)90 h1_dropout = tf.nn.dropout(h1, keep_prob)91 # 2nd Hidden layer with dropout92 h2 = tf.nn.relu(tf.matmul(h1_dropout, w2) + b2)93 h2_dropout = tf.nn.dropout(h2, keep_prob)94 # Run sigmoid on output to get 0 to 195 out = tf.nn.sigmoid(tf.matmul(h2_dropout, w3) + b3)96 # Loss function with L2 Regularization97 regularizers = tf.nn.l2_loss(w1) + tf.nn.l2_loss(w2) + tf.nn.l2_loss(w3)98 scaled_out = tf.multiply(out, MAX_Y) # Scale output99 return inputs, out, scaled_out, regularizers100def main(_):101 tf.set_random_seed(RANDOM_SEED)102 # Create the model103 x = tf.placeholder(tf.float32, [None, IN_DIM])104 # Define loss and optimizer105 y_ = tf.placeholder(tf.float32, [None, 1])106 # Dropout on hidden layers107 keep_prob = tf.placeholder("float")108 # Build the graph for the deep net109 inputs, out, scaled_out, regularizers = deep_network(x, keep_prob)110 # Normal loss function (RMSE)111 loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(y_, scaled_out))))112 # Loss function with L2 Regularization113 loss = tf.reduce_mean(loss + BETA * regularizers)114 # Optimizer115 train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)116 total_error = tf.reduce_sum(tf.square(tf.subtract(y_, tf.reduce_mean(y_))))117 unexplained_error = tf.reduce_sum(tf.square(tf.subtract(y_, scaled_out)))118 accuracy = tf.subtract(1.0, tf.divide(unexplained_error, total_error))119 # Save model120 saver = tf.train.Saver(max_to_keep=5)121 with tf.Session() as sess:122 #if RESTORE:123 # print('Loading Model...')124 # ckpt = tf.train.get_checkpoint_state('./models/neural/')125 # saver.restore(sess, ckpt.model_checkpoint_path)126 #else:127 sess.run(tf.global_variables_initializer())128 train, y_train, test_submit, test_submit_id = get_data()129 # Train until maximum steps reached or interrupted130 for i in range(START, STEPS):131 k_fold = KFold(n_splits=10, shuffle=True)132 #if i % 100 == 0:133 # saver.save(sess, './models/neural/step_' + str(i) + '.cptk')134 for k, (ktrain, ktest) in enumerate(k_fold.split(train, y_train)):135 train_step.run(feed_dict={x: train[ktrain], y_: y_train[ktrain], keep_prob: DROPOUT})136 # Show test score every 10 iterations137 if i % 10 == 0:138 # Tensorflow R2139 #train_accuracy = accuracy.eval(feed_dict={140 # x: train[ktest], y_: y_train[ktest]})141 # SkLearn metrics R2142 train_accuracy = r2_score(y_train[ktest],143 sess.run(scaled_out, feed_dict={x: train[ktest], keep_prob: 1.0}))144 print('Step: %d, Fold: %d, R2 Score: %g' % (i, k, train_accuracy))145 ####################146 # CV (repeat 5 times)147 ####################148 CV = []149 for i in range(5):150 k_fold = KFold(n_splits=10, shuffle=True)151 for k, (ktrain, ktest) in enumerate(k_fold.split(train, y_train)):152 # Tensorflow R2153 #accuracy = accuracy.eval(feed_dict={154 # x: train[ktest], y_: y_train[ktest]})155 # SkLearn metrics R2156 accuracy = r2_score(y_train[ktest],157 sess.run(scaled_out, feed_dict={x: train[ktest], keep_prob: 1.0}))158 print('Step: %d, Fold: %d, R2 Score: %g' % (i, k, accuracy))159 CV.append(accuracy)160 print('Mean R2: %g' % (np.mean(CV)))161if __name__ == '__main__':...

Full Screen

Full Screen

deepNN-Liam.py

Source:deepNN-Liam.py Github

copy

Full Screen

1import numpy as np2import pandas as pd3from sklearn.preprocessing import LabelEncoder4import tensorflow as tf5from sklearn.metrics import r2_score6from sklearn.model_selection import KFold7###################8# Load & process data9###################10def get_data():11 #################12 # read datasets13 #################14 train = pd.read_csv('../input/train.csv')15 test_submit = pd.read_csv('../input/test.csv')16 # Get y and ID17 train = train[train.y < 250] # Optional: Drop y outliers18 y_train = train['y']19 train = train.drop('y', 1)20 test_submit_id = test_submit['ID']21 #########################22 # Create data23 #########################24 features = ['X0',25 'X5',26 'X118',27 'X127',28 'X47',29 'X315',30 'X311',31 'X179',32 'X314',33 'X232',34 'X29',35 'X263',36 'X261']37 # Build a new dataset using key parameters, lots of drops38 train = train[features]39 test_submit = test_submit[features]40 # Label encoder41 for c in train.columns:42 if train[c].dtype == 'object':43 lbl = LabelEncoder()44 lbl.fit(list(train[c].values) + list(test_submit[c].values))45 train[c] = lbl.transform(list(train[c].values))46 test_submit[c] = lbl.transform(list(test_submit[c].values))47 # Convert to matrix48 train = train.as_matrix()49 y_train = np.transpose([y_train.as_matrix()])50 test_submit = test_submit.as_matrix()51 test_submit_id = test_submit_id.as_matrix()52 #print(train.shape)53 #print(test_submit.shape)54 return train, y_train, test_submit, test_submit_id55#####################56# Neural Network57#####################58# Training steps59STEPS = 50060LEARNING_RATE = 0.000161BETA = 0.0162DROPOUT = 0.563RANDOM_SEED = 1234564MAX_Y = 25065RESTORE = True66START = 067# Training variables68IN_DIM = 1369# Network Parameters - Hidden layers70n_hidden_1 = 10071n_hidden_2 = 5072def weight_variable(shape):73 initial = tf.truncated_normal(shape, stddev=0.01)74 return tf.Variable(initial)75def bias_variable(shape):76 initial = tf.constant(0.03, shape=shape)77 return tf.Variable(initial)78def deep_network(inputs, keep_prob):79 # Input -> Hidden Layer80 w1 = weight_variable([IN_DIM, n_hidden_1])81 b1 = bias_variable([n_hidden_1])82 # Hidden Layer -> Hidden Layer83 w2 = weight_variable([n_hidden_1, n_hidden_2])84 b2 = bias_variable([n_hidden_2])85 # Hidden Layer -> Output86 w3 = weight_variable([n_hidden_2, 1])87 b3 = bias_variable([1])88 # 1st Hidden layer with dropout89 h1 = tf.nn.relu(tf.matmul(inputs, w1) + b1)90 h1_dropout = tf.nn.dropout(h1, keep_prob)91 # 2nd Hidden layer with dropout92 h2 = tf.nn.relu(tf.matmul(h1_dropout, w2) + b2)93 h2_dropout = tf.nn.dropout(h2, keep_prob)94 # Run sigmoid on output to get 0 to 195 out = tf.nn.sigmoid(tf.matmul(h2_dropout, w3) + b3)96 # Loss function with L2 Regularization97 regularizers = tf.nn.l2_loss(w1) + tf.nn.l2_loss(w2) + tf.nn.l2_loss(w3)98 scaled_out = tf.multiply(out, MAX_Y) # Scale output99 return inputs, out, scaled_out, regularizers100def main(_):101 tf.set_random_seed(RANDOM_SEED)102 # Create the model103 x = tf.placeholder(tf.float32, [None, IN_DIM])104 # Define loss and optimizer105 y_ = tf.placeholder(tf.float32, [None, 1])106 # Dropout on hidden layers107 keep_prob = tf.placeholder("float")108 # Build the graph for the deep net109 inputs, out, scaled_out, regularizers = deep_network(x, keep_prob)110 # Normal loss function (RMSE)111 loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(y_, scaled_out))))112 # Loss function with L2 Regularization113 loss = tf.reduce_mean(loss + BETA * regularizers)114 # Optimizer115 train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)116 total_error = tf.reduce_sum(tf.square(tf.subtract(y_, tf.reduce_mean(y_))))117 unexplained_error = tf.reduce_sum(tf.square(tf.subtract(y_, scaled_out)))118 accuracy = tf.subtract(1.0, tf.divide(unexplained_error, total_error))119 # Save model120 saver = tf.train.Saver(max_to_keep=5)121 with tf.Session() as sess:122 #if RESTORE:123 # print('Loading Model...')124 # ckpt = tf.train.get_checkpoint_state('./models/neural/')125 # saver.restore(sess, ckpt.model_checkpoint_path)126 #else:127 sess.run(tf.global_variables_initializer())128 train, y_train, test_submit, test_submit_id = get_data()129 # Train until maximum steps reached or interrupted130 for i in range(START, STEPS):131 k_fold = KFold(n_splits=10, shuffle=True)132 #if i % 100 == 0:133 # saver.save(sess, './models/neural/step_' + str(i) + '.cptk')134 for k, (ktrain, ktest) in enumerate(k_fold.split(train, y_train)):135 train_step.run(feed_dict={x: train[ktrain], y_: y_train[ktrain], keep_prob: DROPOUT})136 # Show test score every 10 iterations137 if i % 10 == 0:138 # Tensorflow R2139 #train_accuracy = accuracy.eval(feed_dict={140 # x: train[ktest], y_: y_train[ktest]})141 # SkLearn metrics R2142 train_accuracy = r2_score(y_train[ktest],143 sess.run(scaled_out, feed_dict={x: train[ktest], keep_prob: 1.0}))144 print('Step: %d, Fold: %d, R2 Score: %g' % (i, k, train_accuracy))145 ####################146 # CV (repeat 5 times)147 ####################148 CV = []149 for i in range(5):150 k_fold = KFold(n_splits=10, shuffle=True)151 for k, (ktrain, ktest) in enumerate(k_fold.split(train, y_train)):152 # Tensorflow R2153 #accuracy = accuracy.eval(feed_dict={154 # x: train[ktest], y_: y_train[ktest]})155 # SkLearn metrics R2156 accuracy = r2_score(y_train[ktest],157 sess.run(scaled_out, feed_dict={x: train[ktest], keep_prob: 1.0}))158 print('Step: %d, Fold: %d, R2 Score: %g' % (i, k, accuracy))159 CV.append(accuracy)160 print('Mean R2: %g' % (np.mean(CV)))161if __name__ == '__main__':...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lettuce_webdriver automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful