How to use test_std_deviation method in pyresttest

Best Python code snippet using pyresttest_python

MGP.py

Source:MGP.py Github

copy

Full Screen

1import torch2import gpytorch3import numpy as np4import matplotlib.pyplot as plt5from matplotlib.gridspec import GridSpec6import h5py7import os8import pdb9from MGP_subclasses import *10from data_processing import *11class Block_MGP():12 def __init__(self, kernel, learning_rate, n_training_iter, block_indices):13 self.kernel = kernel14 self.learning_rate = learning_rate15 self.n_training_iter = n_training_iter16 self.block_indices = block_indices17 self.number_of_block = len(block_indices)18 self.total_nb_tasks = len([item for sublist in self.block_indices for item in sublist])19 self.model = []20 self.likelihood = []21 self.loss_list = []22 def build_and_train_single_model(self, x_train, y_train, block_number=0, smart_end = False):23 '''24 :param x_train: array size nb_timesteps *1, represents time25 :param y_train: array size nb_timesteps * nb_tasks26 :param block_number: the number of the block, starts from 027 :return: modifies the attributes model and likelihood according to the training data28 '''29 nb_tasks = y_train.shape[-1]30 if nb_tasks == 1:31 self.likelihood.append(gpytorch.likelihoods.GaussianLikelihood())32 y_train = y_train[:,0]33 self.model.append(Single_task_GP_model(x_train, y_train, self.likelihood[block_number], self.kernel))34 if nb_tasks>1: #if no model has been ever trained, create a model35 self.likelihood.append(gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=nb_tasks))36 self.model.append(Multitask_GP_Model(x_train, y_train, self.likelihood[block_number], nb_tasks, self.kernel))37 self.model[block_number].train()38 self.likelihood[block_number].train()39 optimizer = torch.optim.Adam([{'params': self.model[block_number].parameters()}, ], lr=self.learning_rate)40 mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood[block_number], self.model[block_number])41 loss_list_cur = []42 plot_frequency = self.n_training_iter // 1043 if smart_end:44 loss_hist = 045 for i in range(self.n_training_iter):46 optimizer.zero_grad()47 output = self.model[block_number](x_train)48 loss = -mll(output, y_train)49 if i>120 and smart_end:50 min_loss_variation = np.min(np.array(loss_list_cur[1:30])-np.array(loss_list_cur[0:29]))51 if loss - loss_hist > - min_loss_variation :52 break53 else:54 loss.backward()55 optimizer.step()56 if i % plot_frequency == 0:57 print('Iter %d/%d - Loss: %.3f' % (i + 1, self.n_training_iter, loss.item()))58 loss_list_cur.append(loss.item())59 else:60 loss.backward()61 optimizer.step()62 if i % plot_frequency == 0:63 print('Iter %d/%d - Loss: %.3f' % (i + 1, self.n_training_iter, loss.item()))64 loss_list_cur.append(loss.item())65 loss_hist = loss.item()66 self.loss_list.append(loss_list_cur)67 def build_and_train_block_models(self, x_train, y_train, smart_end = False):68 '''69 :param x_train: array size nb_timesteps *1, represents time70 :param y_train: array size nb_timesteps * nb_tasks71 :return: train the multiple MGP, one for each block72 '''73 for i in range(self.number_of_block):74 print('### BLOCK %d ###'%i)75 self.build_and_train_single_model(x_train, y_train[:,self.block_indices[i]], i, smart_end)76 def test_block_model(self, x_test):77 '''78 :param x_test: array size nb_timesteps_test * 1, represents time79 :return: test_mean_list : the mean of the posterior MGPs80 test_covar_matrix_list : the psoetrior covariance matrices81 test_std :the standard deviation of the MGPs82 BE CAREFUL : the outputs are list, each block has then its own mean /coavriances arrays83 '''84 test_mean_list = []85 test_covar_matrix_list = []86 test_std = []87 for i in range(self.number_of_block):88 self.model[i].eval()89 self.likelihood[i].eval()90 with torch.no_grad(), gpytorch.settings.fast_pred_var():91 test_observed_pred = self.likelihood[i](self.model[i](x_test))92 test_mean= test_observed_pred.mean.detach().numpy()93 test_covar_matrix = self.model[i].return_covar_matrix(x_test).detach().numpy()94 test_mean_list.append(test_mean)95 test_covar_matrix_list.append(test_covar_matrix)96 test_lower, test_upper = test_observed_pred.confidence_region() #95% confidence interval97 test_lower, test_upper = test_lower.detach().numpy(), test_upper.detach().numpy()98 test_std.append((test_upper - test_lower) / 2*1.96) # 95% confidence interval to std99 return test_mean_list, test_covar_matrix_list, test_std100 def plot_model(self, x_train, y_train, x_test, train_filter):101 '''102 :param x_train: array size nb_timesteps * 1, represents time103 :param y_train: array size nb_timesteps * nb_tasks104 :param x_test: array size nb_timesteps_test * 1, represents time105 :param train_filter : indices of the selected points for the training106 :return: a plot of the losses, the covariance matrices and the regression for each block107 '''108 test_mean_list, test_covar_matrix_list, test_std_deviation = self.test_block_model(x_test)109 fig = plt.figure(figsize=(18.5,9))110 gs = GridSpec(2, max(self.total_nb_tasks, 2*self.number_of_block))111 iter = 0112 for j in range(self.number_of_block):113 if len(self.block_indices[j])==1: #Single GP114 ax = fig.add_subplot(gs[0, iter])115 ax.plot(x_test.detach().numpy(), test_mean_list[j])116 ax.fill_between(x_test, test_mean_list[j] + test_std_deviation[j],117 test_mean_list[j] - test_std_deviation[j], alpha=0.3)118 ax.set_title('Block %d Level %d' % (j, self.block_indices[j][0]))119 ax.plot(x_train.detach().numpy(), y_train.detach().numpy()[:, self.block_indices[j]],color='tomato')120 ax.plot(x_train.detach().numpy()[train_filter],y_train.detach().numpy()[train_filter, self.block_indices[j][0]], 'k*', color='red')121 iter = iter + 1122 ax.axvline(x_train.shape[0]/x_test.shape[0], color='green')123 else: #MGP124 for i in range(len(self.block_indices[j])):125 ax = fig.add_subplot(gs[0, iter])126 ax.plot(x_test.detach().numpy(), test_mean_list[j][:,i])127 ax.fill_between(x_test, test_mean_list[j][:,i] + test_std_deviation[j][:,i], test_mean_list[j][:,i] - test_std_deviation[j][:,i], alpha=0.3)128 ax.set_title('Block %d Level %d'%(j,self.block_indices[j][i]))129 ax.plot(x_train.detach().numpy(), y_train.detach().numpy()[:, self.block_indices[j][i]], color='tomato')130 ax.plot(x_train.detach().numpy()[train_filter], y_train.detach().numpy()[train_filter, self.block_indices[j][i]], 'k*', color='red')131 ax.axvline(x_train.shape[0]/x_test.shape[0], color='green')132 iter=iter+1133 for j in range(self.number_of_block):134 nb_tasks = len(self.block_indices[j])135 if nb_tasks ==1: #single GP136 ax1 = fig.add_subplot(gs[1, 2*j])137 ax1.imshow(test_covar_matrix_list[j])138 ax1.set_title('Block %d Covar Matrix' % j)139 if nb_tasks > 1 : # multi GP140 ax1 = fig.add_subplot(gs[1, 2*j])141 matrix = change_representation_covariance_matrix(test_covar_matrix_list[j], nb_tasks)142 ax1.imshow(matrix)143 ax1.set_title('Block %d Covar Matrix' % j)144 ax2 = fig.add_subplot(gs[1, 2*j+1])145 ax2.plot(self.loss_list[j])146 ax2.set_title('Block %d Loss' % j)147 plt.show()148def train_Block_MGP_multiple_individuals(x_train, y_train, x_test, y_test, block_indices,149 kernel, learning_rate, n_iter,150 train_sample_subset = np.array([]), main_dir='unknown_dir', exec_type='unknown_exec', train_sampling_type = 'unknown_sampling',151 activate_plot=False, smart_end = False):152 '''153 :param x_train: array size nb_timesteps_test * 1, represents time154 :param y_train: array size nb_individuals * nb_timesteps_test * number_tasks155 :param block_indices: list of lists of indices (ex: [[0,1],[2,3],[4]]156 :param x_test: array size nb_timesteps_test * 1, represents time157 :param y_test: array size nb_individuals * nb_timesteps_test * number_tasks158 :param save_h5: boolean, to save the test values in a h5 file or not159 :param activate_plot: to plot for each individual the resulted regressions, losses...160 :return: train Block MGP for multiple individuals161 :return: predicted values (of same size as y_test) at x_test162 BE CAREFUL : x_train and x_test must be the same for all the individuals...163 '''164 flat_block_indices = [item for sublist in block_indices for item in sublist]165 y_predicted = np.nan*np.ones(y_test.numpy().shape)166 a = []167 for i in range(len(block_indices)):168 a.append([])169 for j in range(len(block_indices[i])):170 a[i].append(flat_block_indices.index(block_indices[i][j]))171 block_indices = a172 if len(x_train.shape)>1:173 raise ValueError('Wrong dimensions for the input X_train, x_train should be a 1D Vector')174 if len(x_test.shape)>1:175 raise ValueError('Wrong dimensions for the input X_test, x_test should be a 1D Vector')176 if x_train.shape[0] != y_train.shape[1]:177 raise ValueError('Number of time steps is different for x_train and y_train')178 flat_indices = [item for sublist in block_indices for item in sublist]179 nb_individuals, _, nb_tasks = y_train.shape180 if max(flat_indices) > nb_tasks:181 raise ValueError('One of the block indices is higher than the number of tasks in Y_train')182 list_means = []183 list_covariance_matrix = []184 for i in range(nb_individuals):185 # Training subset?186 if len(train_sample_subset.shape)==0:187 this_train_sample_subset = np.arange(x_train.shape[0])188 elif len(train_sample_subset.shape)==1:189 this_train_sample_subset=train_sample_subset190 elif len(train_sample_subset.shape)==2:191 this_train_sample_subset=train_sample_subset[i]192 else:193 raise ValueError('Error with train_sample_subset.shape={}'.format(train_sample_subset.shape))194 # Just use subset for training195 x_train_cur = x_train[this_train_sample_subset]196 y_train_cur = y_train[i, this_train_sample_subset]197 print('########### INDIVIDUAL %d ###########'%i)198 # Define and train199 mgp = Block_MGP(kernel, learning_rate, n_iter, block_indices)200 mgp.build_and_train_block_models(x_train_cur, y_train_cur, smart_end)201 # Plot if desired202 if activate_plot:203 mgp.plot_model(x_train, y_train[i], x_test, train_filter = this_train_sample_subset)204 205 # Predict for this individual206 test_mean_list, test_covar_matrix_list, _ = mgp.test_block_model(x_test)207 list_means.append(test_mean_list)208 list_covariance_matrix.append(test_covar_matrix_list)209 # Keep predicted mean210 for k in range(len(block_indices)):211 y_predicted[i,:,block_indices[k]]=test_mean_list[k].T212 # Save dataset213 h5_dataset_path='{}/{}/trained_models/MGP{}blocks_{}.h5'.format(main_dir, exec_type, len(block_indices), train_sampling_type)214 h5_dataset = h5py.File(h5_dataset_path, 'w')215 # Per block216 for i in range(len(block_indices)):217 cur_mean = np.array([list_means[j][i] for j in range(y_train.shape[0])])218 cur_covariance = np.array([list_covariance_matrix[j][i] for j in range(y_train.shape[0])])219 h5_dataset.create_dataset('mean_block_%d'%i, data=cur_mean)220 h5_dataset.create_dataset('covar_block_%d'%i, data=cur_covariance)221 # All predictions222 h5_dataset.create_dataset('y_predicted', data=y_predicted)223 h5_dataset.close()224 return h5_dataset_path, y_predicted225# Making sure the main program is not executed when the module is imported226if __name__ == '__main__':...

Full Screen

Full Screen

Learning_Curve.py

Source:Learning_Curve.py Github

copy

Full Screen

1from Cross_Validation import Cross_Validation as crv2from sklearn.utils import shuffle3import matplotlib.pyplot as plt4import numpy as np5import time6class Learning_Curve:7 """8 This class is contains utility methods to compute and plot learning curves9 """10 """ Takes the following parameters as an input:11 learner: a SKLearn Classifier (SKLearn Classifier)12 k: number of iteration of Cross-Validation (float)13 examples: the Bag of Words (sparse matrix of integers)14 labels: labels for each sample (list of integers)15 sizes: sizes to be tested (list of floats between 0 and 1)16 Returns:17 trains_sizes: the tested sizes (list of integers)18 train_scores: the scores achieved on the train set (matrix of floats between 0 and 100)19 test_scores: the scores achieved on the test set (matrix of floats between 0 and 100)20 """21 @staticmethod22 def Learning_Curve(learner, k, examples, labels, sizes, type="shuffle"):23 train_scores = []24 test_scores = []25 train_sizes = []26 examples, labels = shuffle(examples, labels, random_state=int(time.time()))27 for s in sizes:28 size = s*examples.shape[0]29 data_slice, labels_slice = shuffle(examples[:size], labels[:size], random_state=int(time.time()))30 if type == "shuffle":31 score = crv.Shuffle_Cross_Validation(learner, k, data_slice, labels_slice, 0.1)32 elif type == "k-fold":33 score = crv.K_Fold_Cross_Validation(learner, k, data_slice, labels_slice)34 train_scores.append(score[0])35 test_scores.append(score[1])36 train_sizes.append(size)37 return train_sizes, train_scores, test_scores38 """ Takes the following parameters as an input:39 trains_sizes: the tested sizes (list of integers)40 avg_train_scores: the scores achieved on the train set for each size(list of floats between 0 and 100)41 avg_test_scores: the scores achieved on the test set for each size(list of floats between 0 and 100)42 test_std_deviation: the standard deviation on the test set for each size (list of floats)43 train_std_deviation: the standard deviation on the train set for each size (list of floats)44 name: the choosen name (string)45 Returns:46 Nothing.47 Plots the learning curve with the given data48 """49 @staticmethod50 def plot_curve(train_size, avg_test_scores, avg_train_scores, test_std_deviation, train_std_deviation, name):51 print name52 for i in zip(train_size, avg_test_scores):53 print i54 for j in zip(train_size, avg_train_scores):55 print j56 plt.figure()57 plt.plot(train_size, avg_test_scores, 'o-', label=name + " Test", color='blue',)58 plt.plot(train_size, avg_train_scores, 'o-', label=name + " Train", color='green')59 plt.fill_between(train_size, [x[0]+x[1] for x in zip(avg_test_scores, test_std_deviation)],60 [x[0]-x[1] for x in zip(avg_test_scores, test_std_deviation)], color='blue', alpha=0.2)61 plt.fill_between(train_size, [x[0]+x[1] for x in zip(avg_train_scores, train_std_deviation)],62 [x[0]-x[1] for x in zip(avg_train_scores, train_std_deviation)], color='green', alpha=0.2)63 plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=1, ncol=1, mode="expand", borderaxespad=0.)64 plt.axis([0, 20000, 0.25, 1.0])65 plt.xticks(range(0, 20000, 1000))66 plt.yticks(np.arange(0.30, 1.0, 0.1))67 for k in range(0, len(train_size)):68 plt.text(train_size[k], avg_test_scores[k], str(round(avg_test_scores[k], 4)))...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pyresttest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful