How to use max_children method in hypothesis

Best Python code snippet using hypothesis

model.py

Source:model.py Github

copy

Full Screen

1import torch2import torch.nn as nn3import torch.nn.init as init4import torch.nn.functional as F5import numpy as np6from torch.autograd import Variable7from .function_util import tensordot8torch.set_default_tensor_type('torch.cuda.DoubleTensor')9def tile(a, dim, n_tile, is_cuda):10 init_dim = a.size(dim)11 repeat_idx = [1] * a.dim()12 repeat_idx[dim] = n_tile13 a = a.repeat(*(repeat_idx))14 order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))15 if is_cuda:16 order_index = order_index.cuda()17 return torch.index_select(a, dim, order_index)18class ETA_T(nn.Module):19 def __init__(self, opt):20 super(ETA_T, self).__init__()21 self.opt = opt22 self.cuda = False23 if self.opt.cuda:24 self.cuda = True25 26 27 def forward(self, children):28 """29 Compute weight matrix for how much each vector belongs to the 'top'30 31 This part is tricky, this implementation only slide over a window of depth `, which means a child node in a window32 always has depth = 1, according to the formula in the original paper, top-coefficient in this case is alwasy 0/1 = 133 """34 batch_size = children.shape[0]35 max_tree_size = children.shape[1]36 max_children = children.shape[2]37 # eta_t is shape (batch_size x max_tree_size x max_children + 1)38 eta = torch.cat((torch.ones((max_tree_size, 1)),torch.zeros((max_tree_size, max_children))),1)39 eta = eta.unsqueeze(0)40 eta = tile(eta, 0, batch_size, self.cuda)41 return eta42class ETA_R(nn.Module):43 def __init__(self, opt):44 super(ETA_R, self).__init__()45 self.opt = opt46 self.cuda = False47 if self.opt.cuda:48 self.cuda = True49 50 def forward(self, children, coef_t):51 """Compute weight matrix for how much each vector belogs to the 'right'"""52 # children is batch_size x max_tree_size x max_children53 batch_size = children.shape[0]54 max_tree_size = children.shape[1]55 max_children = children.shape[2]56 # num_siblings is shape (batch_size x max_tree_size x 1)57 num_siblings = max_children - (children == 0).sum(dim=2,keepdim=True)58 59 # num_siblings is shape (batch_size x max_tree_size x max_children + 1)60 num_siblings = tile(num_siblings, 2, max_children + 1, self.cuda).double()61 # creates a mask of 1's and 0's where 1 means there is a child there62 # has shape (batch_size x max_tree_size x max_children + 1)63 mask = torch.cat((torch.zeros((batch_size, max_tree_size, 1)).double(),torch.min(children,torch.ones((batch_size, max_tree_size, max_children)).double())),2)64 65 # child indices for every tree (batch_size x max_tree_size x max_children + 1)66 child_indices = torch.arange(-1.0, float(max_children) , 1.0).double()67 child_indices = child_indices.unsqueeze(0)68 child_indices = child_indices.unsqueeze(0)69 child_indices = tile(child_indices, 0 , batch_size, self.cuda)70 child_indices = tile(child_indices, 1, max_tree_size, self.cuda)71 child_indices = torch.mul(child_indices,mask)72 # weights for every tree node in the case that num_siblings = 073 # shape is (batch_size x max_tree_size x max_children + 1)74 singles = torch.cat((torch.zeros((batch_size, max_tree_size,1)).double(), torch.tensor((),dtype=torch.double).new_full((batch_size, max_tree_size, 1),0.5), torch.zeros((batch_size, max_tree_size, max_children - 1)).double()),2)75 # eta_r is shape (batch_size x max_tree_size x max_children + 1)76 result = torch.where(77 torch.eq(num_siblings,torch.ones((batch_size, max_tree_size, max_children + 1)).double()),78 singles,79 torch.mul((1.0 - coef_t).double(),torch.div(child_indices,num_siblings-1.0).double())80 )81 return result82class ETA_L(nn.Module):83 def __init__(self, opt):84 super(ETA_L, self).__init__()85 self.opt = opt86 self.cuda = False87 if self.opt.cuda:88 self.cuda = True89 90 91 def forward(self, children, coef_t, coef_r):92 """Compute weight matrix for how much each vector belongs to the 'left'"""93 # creates a mask of 1's and 0's where 1 means there is a child there94 # has shape (batch_size x max_tree_size x max_children + 1)95 batch_size = children.shape[0]96 max_tree_size = children.shape[1]97 max_children = children.shape[2]98 mask = torch.cat((99 torch.zeros((batch_size, max_tree_size, 1)).double(),torch.min(children, torch.ones((batch_size, max_tree_size, max_children)).double()))100 ,2)101 102 # eta_l is shape (batch_size x max_tree_size x max_children + 1)103 result = torch.mul(torch.mul((1.0 - coef_t).double(),(1.0 - coef_r).double()).double(),mask)104 return result105class CHILDREN_TENSOR(nn.Module):106 def __init__(self, opt):107 super(CHILDREN_TENSOR, self).__init__()108 self.opt = opt109 self.cuda = False110 if self.opt.cuda:111 self.cuda = True112 self.num_features = opt.num_features113 114 115 def forward(self, nodes, children, feature_size):116 # children is batch x num_nodes117 batch_size = children.shape[0]118 num_nodes = children.shape[1]119 max_children = children.shape[2]120 # replace the root node with the zero vector so lookups for the 0th121 # vector return 0 instead of the root vector122 # zero_vecs is (batch_size, num_nodes, 1)123 zero_vecs = torch.zeros((batch_size, 1, self.num_features)).double()124 # vector_lookup is (batch_size x num_nodes x feature_size)125 # print("Shape zero vec : " + str(zero_vecs.shape))126 vector_lookup = torch.cat((zero_vecs, nodes[:, 1:, :]), 1)127 # print("Vector look up : " + str(vector_lookup.shape))128 # children is (batch_size x num_nodes x num_children x 1)129 children = children.unsqueeze(3)130 # prepend the batch indices to the 4th dimension of children131 # batch_indices is (batch_size x 1 x 1 x 1)132 batch_indices = torch.arange(0, batch_size)133 batch_indices = batch_indices.view(batch_size, 1, 1, 1).double()134 # batch_indices is (batch_size x num_nodes x num_children x 1)135 batch_indices = tile(batch_indices, 1, num_nodes, self.cuda)136 batch_indices = tile(batch_indices, 2, max_children, self.cuda)137 # children is (batch_size x num_nodes x num_children x 2)138 children = torch.cat((batch_indices, children), 3).long()139 # output will have shape (batch_size x num_nodes x num_children x feature_size)140 # NOTE: tf < 1.1 contains a bug that makes backprop not work for this!141 142 result = vector_lookup[children[:,:,:,0],children[:,:,:,1],:]143 return result144class CONV_LAYER(nn.Module):145 def __init__(self, opt):146 super(CONV_LAYER, self).__init__()147 self.opt = opt148 self.num_features = opt.num_features149 self.output_size = opt.output_size150 self.conv_node = CONV_NODE(self.opt)151 def forward(self, num_conv, nodes, children, feature_size):152 nodes = [153 self.conv_node(nodes, children, feature_size)154 for _ in range(num_conv)155 ]156 return torch.cat(nodes, 2)157class CONV_NODE(nn.Module):158 def __init__(self, opt):159 super(CONV_NODE, self).__init__()160 self.opt = opt161 self.num_features = opt.num_features162 self.output_size = opt.output_size163 self.conv_step = CONV_STEP(self.opt)164 165 self.w_t = torch.nn.Parameter(data=torch.Tensor(self.num_features,self.output_size), requires_grad=True)166 self.w_t.data.uniform_(-1, 1)167 self.w_r = torch.nn.Parameter(data=torch.Tensor(self.num_features,self.output_size), requires_grad=True)168 self.w_r.data.uniform_(-1, 1)169 self.w_l = torch.nn.Parameter(data=torch.Tensor(self.num_features,self.output_size), requires_grad=True)170 self.w_l.data.uniform_(-1, 1)171 self.b_conv = torch.nn.Parameter(data=torch.Tensor(self.output_size), requires_grad=True)172 self.b_conv.data.uniform_(-1, 1)173 # self.w_t = torch.randn(self.num_features, self.output_size, requires_grad=True)174 # self.w_l = torch.randn(self.num_features, self.output_size, requires_grad=True)175 # self.w_r = torch.randn(self.num_features, self.output_size, requires_grad=True)176 # self.b_conv = Variable(torch.randn(self.output_size))177 178 def forward(self, nodes, children, feature_size):179 """Perform convolutions over every batch sample."""180 batch_size = children.shape[0]181 max_tree_size = children.shape[1]182 max_children = children.shape[2]183 184 conv_result = self.conv_step(nodes, children, feature_size, self.w_t, self.w_r, self.w_l, self.b_conv)185 return conv_result186class CONV_STEP(nn.Module):187 def __init__(self, opt):188 super(CONV_STEP, self).__init__()189 self.opt = opt190 self.cuda = False191 if self.opt.cuda:192 self.cuda = True193 194 self.num_features = opt.num_features195 self.children_tensor = CHILDREN_TENSOR(self.opt)196 def eta_l(self, children, coef_t, coef_r):197 """Compute weight matrix for how much each vector belongs to the 'left'"""198 # creates a mask of 1's and 0's where 1 means there is a child there199 # has shape (batch_size x max_tree_size x max_children + 1)200 batch_size = children.shape[0]201 max_tree_size = children.shape[1]202 max_children = children.shape[2]203 mask = torch.cat((204 torch.zeros((batch_size, max_tree_size, 1)).double(),torch.min(children, torch.ones((batch_size, max_tree_size, max_children)).double()))205 ,2)206 207 # eta_l is shape (batch_size x max_tree_size x max_children + 1)208 result = torch.mul(torch.mul((1.0 - coef_t).double(),(1.0 - coef_r).double()).double(),mask)209 return result210 def eta_t(self, children):211 """212 Compute weight matrix for how much each vector belongs to the 'top'213 214 This part is tricky, this implementation only slide over a window of depth `, which means a child node in a window215 always has depth = 1, according to the formula in the original paper, top-coefficient in this case is alwasy 0/1 = 1216 """217 batch_size = children.shape[0]218 max_tree_size = children.shape[1]219 max_children = children.shape[2]220 # eta_t is shape (batch_size x max_tree_size x max_children + 1)221 eta = torch.cat((torch.ones((max_tree_size, 1)),torch.zeros((max_tree_size, max_children))),1)222 eta = eta.unsqueeze(0)223 eta = tile(eta, 0, batch_size, self.cuda)224 return eta225 def eta_r(self, children, coef_t):226 """Compute weight matrix for how much each vector belogs to the 'right'"""227 # children is batch_size x max_tree_size x max_children228 batch_size = children.shape[0]229 max_tree_size = children.shape[1]230 max_children = children.shape[2]231 # num_siblings is shape (batch_size x max_tree_size x 1)232 num_siblings = max_children - (children == 0).sum(dim=2,keepdim=True)233 234 # num_siblings is shape (batch_size x max_tree_size x max_children + 1)235 num_siblings = tile(num_siblings, 2, max_children + 1, self.cuda).double()236 # creates a mask of 1's and 0's where 1 means there is a child there237 # has shape (batch_size x max_tree_size x max_children + 1)238 mask = torch.cat((torch.zeros((batch_size, max_tree_size, 1)).double(),torch.min(children,torch.ones((batch_size, max_tree_size, max_children)).double())),2)239 240 # child indices for every tree (batch_size x max_tree_size x max_children + 1)241 child_indices = torch.arange(-1.0, float(max_children) , 1.0).double()242 child_indices = child_indices.unsqueeze(0)243 child_indices = child_indices.unsqueeze(0)244 child_indices = tile(child_indices, 0 , batch_size, self.cuda)245 child_indices = tile(child_indices, 1, max_tree_size, self.cuda)246 child_indices = torch.mul(child_indices,mask)247 # weights for every tree node in the case that num_siblings = 0248 # shape is (batch_size x max_tree_size x max_children + 1)249 singles = torch.cat((torch.zeros((batch_size, max_tree_size,1)).double(), torch.tensor((),dtype=torch.double).new_full((batch_size, max_tree_size, 1),0.5), torch.zeros((batch_size, max_tree_size, max_children - 1)).double()),2)250 # eta_r is shape (batch_size x max_tree_size x max_children + 1)251 result = torch.where(252 torch.eq(num_siblings,1.0),253 singles,254 torch.mul((1.0 - coef_t).double(),torch.div(child_indices,num_siblings-1.0).double())255 )256 return result257 def forward(self, nodes, children, feature_size, w_t, w_r, w_l, b_conv):258 """Convolve a batch of nodes and children.259 Lots of high dimensional tensors in this function. Intuitively it makes260 more sense if we did this work with while loops, but computationally this261 is more efficient. Don't try to wrap your head around all the tensor dot262 products, just follow the trail of dimensions.263 """264 # nodes is shape (batch_size x max_tree_size x feature_size)265 batch_size = children.shape[0]266 max_tree_size = children.shape[1]267 max_children = children.shape[2]268 # children is shape (batch_size x max_tree_size x max_children)269 # children_tensor = CHILDREN_TENSOR(self.max_children, self.batch_size, self.max_tree_size, feature_size, self.opt)270 children_vectors = self.children_tensor(nodes, children, feature_size)271 # add a 4th dimension to the nodes tensor272 nodes = nodes.unsqueeze(2)273 # tree_tensor is shape (batch_size x max_tree_size x max_children + 1 x feature_size)274 tree_tensor = torch.cat((nodes, children_vectors), 2)275 c_t = self.eta_t(children)276 c_r = self.eta_r(children, c_t)277 c_l = self.eta_l(children, c_t, c_r)278 # coef = self.coefficients(children)279 coef = torch.stack((c_t, c_r, c_l),3).double()280 weights = torch.stack([w_t, w_r, w_l], 0).double()281 # reshape for matrix multiplication282 x = batch_size * max_tree_size283 y = max_children + 1284 result = tree_tensor.view(x,y,feature_size)285 coef = coef.view(x,y,3)286 287 result = torch.matmul(torch.transpose(result, 1, 2), coef)288 result = result.view(batch_size, max_tree_size, 3, feature_size)289 # # output is (batch_size, max_tree_size, output_size)290 result = tensordot(result, weights, [[2, 3], [0, 1]])291 # # output is (batch_size, max_tree_size, output_size)292 293 return torch.tanh(result + b_conv)294class TBCNN(nn.Module):295 """296 Tree-Based Convolutional Neural Network (TBCNN)297 298 """299 def __init__(self, opt):300 super(TBCNN, self).__init__()301 self.opt = opt302 self.num_features = opt.num_features303 self.output_size = opt.output_size304 self.label_size = opt.n_classes305 self.hidden_layer = nn.Sequential(306 nn.Linear(self.output_size, self.output_size),307 nn.LeakyReLU(),308 nn.Linear(self.output_size, self.label_size),309 310 )311 self.conv_layer = CONV_LAYER(self.opt)312 313 self._initialization()314 315 def pooling_layer(self, nodes):316 return torch.max(nodes, 1)[0]317 318 def _initialization(self):319 for m in self.modules():320 if isinstance(m, nn.Linear):321 init.xavier_normal_(m.weight.data)322 if m.bias is not None:323 init.normal_(m.bias.data)324 def forward(self, nodes, children):325 326 conv = self.conv_layer(1, nodes, children, self.num_features)327 # print(conv)328 # print(conv.shape)329 pooling = self.pooling_layer(conv)330 # print(pooling)331 # print(pooling.shape)332 features = self.hidden_layer(pooling)333 # print(features)...

Full Screen

Full Screen

network.py

Source:network.py Github

copy

Full Screen

1import torch2import torch.nn as nn3import torch.nn.functional as F4import math5from torch.autograd import Variable6# from torchsummary import summary78def truncated_normal_(tensor, mean=0, std=0.09): # https://zhuanlan.zhihu.com/p/83609874 tf.trunc_normal()910 size = tensor.shape11 tmp = tensor.new_empty(size + (4,)).normal_()12 valid = (tmp < 2) & (tmp > -2)13 ind = valid.max(-1, keepdim=True)[1]14 tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))15 tensor.data.mul_(std).add_(mean)16 return tensor1718def th_gather_nd( x, indices):19 newshape = indices.shape[:-1] + x.shape[indices.shape[-1]:]20 indices = indices.view(-1, indices.shape[-1]).tolist()21 out = torch.cat([x.__getitem__(tuple(i)) for i in indices])22 return out.reshape(newshape)2324def eta_t(children):25 """Compute weight matrix for how much each vector belongs to the 'top'"""26 # children is shape (batch_size x max_tree_size x max_children)27 batch_size = children.size(0)28 max_tree_size = children.size(1)29 max_children = children.size(2)30 # eta_t is shape (batch_size x max_tree_size x max_children + 1)31 return (torch.unsqueeze(torch.cat(32 [torch.ones((max_tree_size, 1)).to(children.device), torch.zeros((max_tree_size, max_children)).to(children.device)],33 dim=1), dim=0,34 )).repeat([batch_size, 1, 1])3536def eta_r(children, t_coef):37 """Compute weight matrix for how much each vector belogs to the 'right'"""38 children = children.type(torch.float32)39 batch_size = children.size(0)40 max_tree_size = children.size(1)41 max_children = children.size(2)4243 # num_siblings is shape (batch_size x max_tree_size x 1)44 num_siblings = torch.sum((~(children == 0)).int(),dim=2,keepdim=True,dtype=torch.float32)45 # num_siblings is shape (batch_size x max_tree_size x max_children + 1)46 num_siblings = num_siblings.repeat(([1, 1, max_children + 1]))4748 # creates a mask of 1's and 0's where 1 means there is a child there49 # has shape (batch_size x max_tree_size x max_children + 1)50 mask = torch.cat(51 [torch.zeros((batch_size, max_tree_size, 1)).to(children.device),52 torch.min(children, torch.ones((batch_size, max_tree_size, max_children)).to(children.device))],53 dim=2)54 # child indices for every tree (batch_size x max_tree_size x max_children + 1)55 child_indices = torch.mul(56 (torch.unsqueeze(57 torch.unsqueeze(58 # torch.arange(-1.0, max_children.type(torch.float32),1.0, dtype=torch.float32),59 torch.arange(-1.0, torch.tensor(max_children, dtype=torch.float32),1.0, dtype=torch.float32),60 dim=0),61 dim=0).repeat([batch_size, max_tree_size, 1])).cuda(),62 mask63 )6465 # weights for every tree node in the case that num_siblings = 066 # shape is (batch_size x max_tree_size x max_children + 1)67 singles = torch.cat(68 [torch.zeros((batch_size, max_tree_size, 1)).to(children.device),69 torch.full((batch_size, max_tree_size, 1), 0.5).to(children.device),70 torch.zeros((batch_size, max_tree_size, max_children - 1)).to(children.device)],71 dim=2)7273 # eta_r is shape (batch_size x max_tree_size x max_children + 1)74 return torch.where(75 # torch.equal(num_siblings, 1.0),76 torch.eq(num_siblings, 1.0),77 # avoid division by 0 when num_siblings == 178 singles,79 # the normal case where num_siblings != 180 (1.0 - t_coef) * (child_indices / (num_siblings - 1.0))81 )8283def eta_l(children, coef_t, coef_r):84 """Compute weight matrix for how much each vector belongs to the 'left'"""85 children = children.type(torch.float32)86 batch_size = children.size(0)87 max_tree_size = children.size(1)88 max_children = children.size(2)8990 # creates a mask of 1's and 0's where 1 means there is a child there91 # has shape (batch_size x max_tree_size x max_children + 1)92 mask = torch.cat(93 [torch.zeros((batch_size, max_tree_size, 1)).to(children.device),94 torch.min(children, torch.ones((batch_size, max_tree_size, max_children)).to(children.device))],95 dim=2)9697 # eta_l is shape (batch_size x max_tree_size x max_children + 1)9899 return torch.mul(100 torch.mul((1.0 - coef_t), (1.0 - coef_r)),mask101 )102103def children_tensor( nodes, children, feature_size):104 max_children = torch.tensor(children.size(2)).cuda()105 batch_size = torch.tensor(nodes.size(0)).cuda()106 num_nodes = torch.tensor(nodes.size(1)).cuda()107108 # replace the root node with the zero vector so lookups for the 0th109 # vector return 0 instead of the root vector110 # zero_vecs is (batch_size, num_nodes, 1)111 zero_vecs = torch.zeros((batch_size, 1, feature_size)).cuda()112 # vector_lookup is (batch_size x num_nodes x feature_size)113 vector_lookup = torch.cat([zero_vecs, nodes[:, 1:, :]], dim=1)114 # children is (batch_size x num_nodes x num_children x 1)115 children = torch.unsqueeze(children, dim=3)116 # prepend the batch indices to the 4th dimension of children117 # batch_indices is (batch_size x 1 x 1 x 1)118 batch_indices = torch.reshape(torch.arange(0, batch_size), (batch_size, 1, 1, 1)).cuda()119 batch_indices = batch_indices.repeat([1, num_nodes, max_children, 1])120 # batch_indices is (batch_size x num_nodes x num_children x 1) batch_indices = batch_size.repeat(1, num_nodes, max_children, 1)121 # children is (batch_size x num_nodes x num_children x 2)122 children = torch.cat([batch_indices, children], dim=3)123 # output will have shape (batch_size x num_nodes x num_children x feature_size)124 return th_gather_nd(vector_lookup, children)125126def conv_step(nodes, children,feature_size,w_t, w_l, w_r, b_conv):127 # nodes is shape (batch_size x max_tree_size x feature_size)128 # children is shape (batch_size x max_tree_size x max_children)129130 # children_vectors will have shape131 # (batch_size x max_tree_size x max_children x feature_size)132 children_vectors = children_tensor(nodes, children, feature_size)133134 # add a 4th dimension to the nodes tensor135 nodes = torch.unsqueeze(nodes, 2)136137 # tree_tensor is shape138 # (batch_size x max_tree_size x max_children + 1 x feature_size)139 tree_tensor = torch.cat([nodes, children_vectors], dim=2)140141 # coefficient tensors are shape (batch_size x max_tree_size x max_children + 1)142 c_t = eta_t(children)143144 c_r = eta_r(children, c_t)145 c_l = eta_l(children, c_t, c_r)146147 # concatenate the position coefficients into a tensor148 # (batch_size x max_tree_size x max_children + 1 x 3)149 coef = torch.stack([c_t, c_r, c_l], dim=3)150 weights = torch.stack([w_t, w_r, w_l], dim=0)151152 batch_size = children.size(0)153 max_tree_size = children.size(1)154 max_children = children.size(2)155156 # reshape for matrix multiplication157 x = batch_size * max_tree_size158 y = max_children + 1159160 result = tree_tensor.reshape(x, y, feature_size)161 coef = coef.reshape(x, y, 3)162 result = torch.matmul(result.transpose(1, 2), coef)163 result = torch.reshape(result, (batch_size, max_tree_size, 3, feature_size))164165 result = torch.tensordot(result, weights, [[2, 3], [0, 1]])166167 return torch.tanh(result + b_conv)168169170def pool_layer(nodes):171 """Creates a max dynamic pooling layer from the nodes."""172 pooled = torch.max(nodes, 1)173 return pooled.values174175176class TBCNN(nn.Module):177 def __init__(self, feature_size, label_size,conv_feature,w_t, w_l, w_r, b_conv, w_h, b_h):178 super(TBCNN, self).__init__()179180 self.feature_size = feature_size181 self.label_size = label_size182 self.conv_feature = conv_feature183184 self.w_t = torch.nn.Parameter(w_t)185 self.w_l = torch.nn.Parameter(w_l)186 self.w_r = torch.nn.Parameter(w_r)187 self.b_conv = torch.nn.Parameter(b_conv)188 self.w_h = torch.nn.Parameter(w_h)189 self.b_h = torch.nn.Parameter(b_h)190191 def hidden_layer(self,pooled):192193 return torch.tanh(torch.matmul(pooled, self.w_h) + self.b_h)194195 def forward(self, nodes, children):196 nodes = torch.tensor(nodes)197 children = torch.tensor(children)198 conv = [199 conv_step(nodes, children, self.feature_size, self.w_t, self.w_l,self.w_r, self.b_conv)200 for _ in range(1)201 ]202 conv = torch.cat(conv, dim=2)203 pooling = pool_layer(conv)204 hidden = self.hidden_layer(pooling)205 # hidden = torch.tanh(torch.matmul(pooling, weights) + bias)206 out = torch.softmax(hidden, dim=-1)207208 return out209210211212213214 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful