How to use test_test_labels method in autotest

Best Python code snippet using autotest_python

TriangleConv_relation.py

Source:TriangleConv_relation.py Github

copy

Full Screen

1import torch2import torch.nn as nn3import os4import numpy as np5import re6import random7import math8from torch.optim import lr_scheduler9from collections import OrderedDict10from torch.autograd import Variable11import warnings12warnings.filterwarnings("ignore")13import torch.nn.functional as F14import torch.utils.data as data15from get_sample_object import getdata16from new_test_dataset import Model10DataSet17device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')18"""19dirpath: str, the path of the directory20"""21def conv_bn_block(input, output, kernel_size):22 return nn.Sequential(23 nn.Conv1d(input, output, kernel_size),24 nn.BatchNorm1d(output),25 nn.ReLU(inplace=True)26 )27def fc_bn_block(input, output):28 return nn.Sequential(29 nn.Linear(input, output),30 nn.BatchNorm1d(output),31 nn.ReLU(inplace=True)32 )33class TriangleConv(nn.Module):34 def __init__(self, layers):35 super(TriangleConv, self).__init__()36 self.layers = layers37 mlp_layers = OrderedDict()38 for i in range(len(self.layers) - 1):39 if i == 0:40 mlp_layers['conv_bn_block_{}'.format(i + 1)] = conv_bn_block(4 * self.layers[i], self.layers[i + 1], 1)41 else:42 mlp_layers['conv_bn_block_{}'.format(i + 1)] = conv_bn_block(self.layers[i], self.layers[i + 1], 1)43 self.mlp = nn.Sequential(mlp_layers)44 def forward(self, X):45 B, N, F = X.shape46 k_indexes = []47 for i in range(N):48 if i == 0:49 k_indexes.append([N - 1, i + 1])50 elif i == N-1:51 k_indexes.append([i - 1, 0])52 else:53 k_indexes.append([i - 1, i+1])54 k_indexes_tensor = torch.Tensor(k_indexes)55 k_indexes_tensor = k_indexes_tensor.long()56 x1 = torch.zeros(B, N, 2, F).to(device)57 for idx, x in enumerate(X):58 x1[idx] = x[k_indexes_tensor]59 x2 = X.reshape([B, N, 1, F]).float()60 x2 = x2.expand(B, N, 2, F)61 x2 = x2-x162 x3 = x2[:, :, 0:1, :]63 x4 = x2[:, :, 1:2, :]64 x4 = x3-x465 x5 = X.reshape([B, N, 1, F]).float()66 x2 = x2.reshape([B, N, 1, 2*F])67 x_triangle = torch.cat([x5, x2, x4], dim=3)68 x_triangle=torch.squeeze(x_triangle)69 x_triangle = x_triangle.permute(0, 2, 1)70 x_triangle = torch.tensor(x_triangle,dtype=torch.float32).to(device)71 out = self.mlp(x_triangle)72 out = out.permute(0, 2, 1)73 return out74class DPCN_vanilla(nn.Module):75 def __init__(self):76 super(DPCN_vanilla, self).__init__()77 #self.num_classes = num_classes78 self.triangleconv_1 = TriangleConv(layers=[2, 64, 64, 64])79 self.triangleconv_2 = TriangleConv(layers=[64, 1024,64])80 #self.fc_block_4 = fc_bn_block(1024, 512)81 #self.drop_4 = nn.Dropout(0.5)82 #self.fc_block_5 = fc_bn_block(512, 256)83 #self.drop_5 = nn.Dropout(0.5)84 #self.fc_6 = nn.Linear(256, 64)85 #self._initialize_weights()86 def _initialize_weights(self):87 for m in self.modules():88 if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):89 nn.init.xavier_normal_(m.weight)90 if m.bias is not None:91 nn.init.constant_(m.bias, 0)92 elif isinstance(m, nn.BatchNorm1d):93 nn.init.constant_(m.weight, 1)94 nn.init.constant_(m.bias, 0)95 def forward(self, x):96 B, N, C = x.shape97 assert C == 2, 'dimension of x does not match'98 x = self.triangleconv_1(x)99 x = self.triangleconv_2(x)100 x = x.permute(0, 2, 1)101 x = nn.MaxPool1d(N)(x)102 x = x.reshape([B, -1])103 return x104class RelationNetwork(nn.Module):105 def __init__(self,input_size,hidden_size):106 super(RelationNetwork, self).__init__()107 self.layer1 = nn.Sequential(108 nn.Conv1d(2,128,kernel_size=3),109 nn.BatchNorm1d(128,momentum=0.9, affine=True),110 nn.LeakyReLU(),111 nn.MaxPool1d(2))112 self.layer2 = nn.Sequential(113 nn.Conv1d(128,64,kernel_size=3),114 nn.BatchNorm1d(64,momentum=0.9, affine=True),115 nn.LeakyReLU(),116 nn.MaxPool1d(2))117 self.fc1 = nn.Linear(896,8)#896118 self.fc2 = nn.Linear(8,1)119 def forward(self,x):120 out = self.layer1(x)#121 out = self.layer2(out)#122 out = out.view(out.size(0),-1)#123 out = F.relu(self.fc1(out))#124 out = torch.sigmoid(self.fc2(out))#125 return out126class_number = 2127support_number = 5128support_train_shot = 5129support_test_shot = 15130def weights_init(m):131 classname = m.__class__.__name__132 if classname.find('Conv') != -1:133 n = m.kernel_size[0] * m.out_channels134 m.weight.data.normal_(0, math.sqrt(2. / n))135 if m.bias is not None:136 m.bias.data.zero_()137 elif classname.find('BatchNorm') != -1:138 m.weight.data.fill_(1)139 m.bias.data.zero_()140 elif classname.find('Linear') != -1:141 n = m.weight.size(1)142 m.weight.data.normal_(0, 0.01)143 m.bias.data = torch.ones(m.bias.data.size())144def new_getdataset(support_data = None, val_data = None, index = None, train_shuffle=True, test_shuffle=True, train1= True):145 support_train_dataset_list = []146 support_test_dataset_list = []147 label_number = 0148 if train1 == True:149 per_task_data = random.sample(support_data,class_number)150 else:151 per_task_data = random.sample(val_data, class_number)152 for i in per_task_data:153 support_train_perclass_dataset = Model10DataSet(train=True,train_xy_reshape = i, index =index,154 shot = support_train_shot,test_shot = support_test_shot,155 label_number = label_number)156 support_train_dataset_list.append(support_train_perclass_dataset)157 support_test_perclass_dataset = Model10DataSet(train=False, train_xy_reshape= i, index=index,158 shot=support_train_shot, test_shot=support_test_shot,159 label_number=label_number)160 support_test_dataset_list.append(support_test_perclass_dataset)161 label_number =label_number + 1162 train_dataset_concat = data.ConcatDataset(support_train_dataset_list)163 test_dataset_concat = data.ConcatDataset(support_test_dataset_list)164 train_loader = torch.utils.data.DataLoader(train_dataset_concat, batch_size=class_number * support_train_shot,165 shuffle=train_shuffle, num_workers=0)166 test_loader = torch.utils.data.DataLoader(test_dataset_concat, batch_size=class_number * support_test_shot,167 shuffle=test_shuffle, num_workers=0)168 support_train, support_train_labels = train_loader.__iter__().next() # 25,25169 # print(support_train.shape)170 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')171 support_train, support_train_labels = support_train.to(device), support_train_labels.to(device)172 support_test, support_test_labels = test_loader.__iter__().next() # 75,75173 support_test, support_test_labels = support_test.to(device), support_test_labels.to(device)174 # print(type(train_loader))175 return support_train, support_train_labels, support_test, support_test_labels176def main():177 print("init data:")178 all_train_data, index = getdata()179 all_train_data = np.array(all_train_data)180 print("This is all_train_data:", all_train_data.shape)181 support_data = all_train_data[0:support_number, :, :, :].tolist()182 val_data = all_train_data[support_number:, :, :, :].tolist()183 print("all_train_data:", np.array(all_train_data).shape)184 print("support_data:", np.array(support_data).shape)185 print("val_data:", np.array(val_data).shape)186 print("init network")187 model = DPCN_vanilla().cuda()188 relation_network = RelationNetwork(16, 8)189 relation_network.apply(weights_init)190 relation_network.cuda()191 optimizer = torch.optim.Adam(model.parameters(), lr=0.001)192 schedular = lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.5)193 relation_network_optim = torch.optim.Adam(relation_network.parameters(), lr=0.001)194 relation_network_scheduler = lr_scheduler.StepLR(relation_network_optim, step_size=10000, gamma=0.5)195 print("Training~~~~~")196 best_accuracy = 0.0197 for episode in range(20000):198 schedular.step()199 relation_network_scheduler.step()200 model.train()201 relation_network.train()202 support_train, support_train_labels, support_test, support_test_labels = new_getdataset(support_data = support_data, index = index, val_data = val_data, train_shuffle=False,test_shuffle=True, train1=True)203 out_support_train = model(support_train)204 out_support_train_2 = out_support_train.view(class_number, support_train_shot, 1, 64)205 out_support_train_3 = torch.sum(out_support_train_2, 1).squeeze(1)206 out_support_train_repeat = out_support_train_3.unsqueeze(0).repeat(class_number * support_test_shot, 1, 1)207 out_support_test = model(support_test)208 out_support_test_repeat = out_support_test.unsqueeze(0).repeat(class_number, 1, 1)209 out_support_test_repeat_transpose = torch.transpose(out_support_test_repeat, 0, 1)210 relation_pairs = torch.cat((out_support_train_repeat, out_support_test_repeat_transpose), 2).view(-1, 1* 2, 64)211 relations = relation_network(relation_pairs).view(-1, class_number)212 mse = nn.MSELoss().cuda()213 input_zero = torch.zeros(support_test_shot * class_number, class_number).cuda()214 support_test_labels = torch.squeeze(support_test_labels, dim=-1)215 input_scatter = input_zero.scatter_(1, support_test_labels.long().view(-1, 1),1)216 one_hot_labels = Variable(input_scatter).cuda()217 loss = mse(relations, one_hot_labels)218 optimizer.zero_grad()219 relation_network_optim.zero_grad()220 loss.backward()221 optimizer.step()222 relation_network_optim.step()223 torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)224 torch.nn.utils.clip_grad_norm_(relation_network.parameters(), 0.5)225 if (episode + 1) % 100 == 0:226 print("episode:", episode + 1, "loss", loss.item())227 if(episode+1)%500== 0:228 print("Testing")229 total_rewards = 0230 model.eval()231 relation_network.eval()232 for i in range(300):233 test_train, test_train_labels, test_test, test_test_labels = new_getdataset(support_data = support_data, val_data = val_data, index = index, train_shuffle=False,test_shuffle=True, train1= False)234 out_test_train = model(test_train)235 out_test_train_2 = out_test_train.view(class_number, support_train_shot, 1, 64)236 out_test_train_3 = torch.sum(out_test_train_2, 1).squeeze(1)237 out_test_train_repeat = out_test_train_3.unsqueeze(0).repeat(class_number * support_test_shot,238 1, 1)239 out_test_test = model(test_test)240 out_test_test_repeat = out_test_test.unsqueeze(0).repeat(class_number, 1, 1)241 out_test_test_repeat_transpose = torch.transpose(out_test_test_repeat, 0, 1)242 relation_pairs = torch.cat((out_test_train_repeat, out_test_test_repeat_transpose), 2).view(-1,1 * 2, 64)243 relations = relation_network(relation_pairs).view(-1, class_number)244 _, predict_labels = torch.max(relations.data, 1)245 predict_labels = predict_labels.cpu().int()246 test_test_labels = test_test_labels.cpu().int()247 rewards = [1 if predict_labels[j] == test_test_labels[j] else 0 for j in248 range(class_number * support_test_shot)]249 total_rewards += np.sum(rewards)250 test_accuracy = total_rewards / 1.0 / class_number / support_test_shot / 300251 print("test accuracy:", test_accuracy)252 if test_accuracy > best_accuracy:253 # save networks254 #torch.save(model.state_dict(),str("./save_models/new_relation/model_" + str(class_number) +"way_" + str(support_test_shot) +"shot.pkl"))255 #torch.save(relation_network.state_dict(),str("./save_models/new_relation/relation_network_"+ str(class_number) +"way_" + str(support_test_shot) +"shot.pkl"))256 #print("save networks for episode:",episode+1)257 best_accuracy = test_accuracy258 model.train()259 relation_network.train()260 print("best_accuracy:",best_accuracy)261 return best_accuracy262if __name__ == '__main__':...

Full Screen

Full Screen

test_mnist.py

Source:test_mnist.py Github

copy

Full Screen

...46 self.assertEqual((60000,), y.shape)47 def test_test_data(self):48 x_l = self.d.get_test_data()49 self.assertEqual((10000, 28, 28), x_l.shape)50 def test_test_labels(self):51 y_l = self.d.get_test_labels()52 self.assertEqual((10000,), y_l.shape)53 def test_multi_log_reg(self):54 # Reduced because we want the tests to finish a bit faster.55 train_count = 1500056 test_count = 500057 # Train data58 X = self.sds.from_numpy( self.d.get_train_data().reshape(59 (60000, 28*28))[:train_count])60 Y = self.sds.from_numpy( self.d.get_train_labels()[:train_count])61 Y = Y + 1.062 # Test data63 Xt = self.sds.from_numpy( self.d.get_test_data().reshape(64 (10000, 28*28))[:test_count])...

Full Screen

Full Screen

test_data.py

Source:test_data.py Github

copy

Full Screen

...31 train_labels = [int(i[1]) for i in train_set]32 numbers = set(range(0, 10))33 assert set(train_labels) == numbers, "All the labels were not found in the training dataset"34@pytest.mark.skipif(not os.path.exists('data/processed/test_tensor.pt'), reason="Data not found")35def test_test_labels():36 test_set = MnistDataset('data/processed/test_tensor.pt')37 numbers = set(range(0, 10))38 test_labels = [int(i[1]) for i in test_set]...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful