How to use query_objects method in autotest

Best Python code snippet using autotest_python

DeformableTransformer-checkpoint.py

Source:DeformableTransformer-checkpoint.py Github

copy

Full Screen

1import copy2from typing import Optional, List3import torch4import torch.nn.functional as F5from torch import nn, Tensor6from .MultiheadAttention import DeformableHeadAttention, generate_ref_points7class Transformer(nn.Module):8 def __init__(self,9 d_model=512,10 nhead=8,11 num_encoder_layers=6,12 num_decoder_layers=6,13 dim_feedforward=2048,14 dropout=0.1,15 activation="relu",16 normalize_before=False,17 return_intermediate_dec=False,18 scales=4,19 k=4,20 last_height=16,21 last_width=16, ):22 super().__init__()23 encoder_layer = TransformerEncoderLayer(C=d_model,24 M=nhead,25 K=k,26 n_levels=scales,27 last_feat_height=last_height,28 last_feat_width=last_width,29 d_ffn=dim_feedforward,30 dropout=dropout,31 normalize_before=normalize_before)32 encoder_norm = nn.LayerNorm(d_model) if normalize_before else None33 self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)34 decoder_layer = TransformerDecoderLayer(C=d_model,35 M=nhead,36 K=k,37 n_levels=scales,38 last_feat_height=last_height,39 last_feat_width=last_width,40 d_ffn=dim_feedforward,41 dropout=dropout,42 normalize_before=normalize_before)43 44 decoder_norm = nn.LayerNorm(d_model)45 self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,46 return_intermediate=return_intermediate_dec)47 self._reset_parameters()48 self.d_model = d_model49 self.nhead = nhead50 self.query_ref_point_proj = nn.Linear(d_model, 2)51 def _reset_parameters(self):52 for p in self.parameters():53 if p.dim() > 1:54 nn.init.xavier_uniform_(p)55 def forward(self, src: List[Tensor],56 masks: List[Tensor],57 query_embed,58 pos_embeds: List[Tensor]):59 bs = src[0].size(0)60 query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)61 # B, C H, W -> B, H, W, C62 for index in range(len(src)):63 src[index] = src[index].permute(0, 2, 3, 1)64 pos_embeds[index] = pos_embeds[index].permute(0, 2, 3, 1)65 # B, H, W, C66 ref_points = []67 for tensor in src:68 _, height, width, _ = tensor.shape69 ref_point = generate_ref_points(width=width,70 height=height)71 ref_point = ref_point.type_as(src[0])72 # H, W, 2 -> B, H, W, 273 ref_point = ref_point.unsqueeze(0).repeat(bs, 1, 1, 1)74 ref_points.append(ref_point)75 tgt = torch.zeros_like(query_embed)76 # List[B, H, W, C]77 memory = self.encoder(src,78 ref_points,79 padding_mask=masks,80 pos_encodings=pos_embeds)81 # L, B, C82 query_ref_point = self.query_ref_point_proj(tgt)83 query_ref_point = F.sigmoid(query_ref_point)84 # Decoder Layers, L, B ,C85 hs = self.decoder(tgt, memory,86 query_ref_point,87 memory_key_padding_masks=masks,88 positional_embeddings=pos_embeds,89 query_pos=query_embed)90 return hs, query_ref_point, memory91class TransformerEncoder(nn.Module):92 def __init__(self, encoder_layer, num_layers, norm=None):93 super().__init__()94 self.layers = nn.ModuleList([copy.deepcopy(encoder_layer) for i in range(num_layers)])95 self.num_layers = num_layers96 self.norm = norm97 def forward(self, input_features, ref_points, input_masks=None, pos_encodings=None, padding_mask=None):98 output = input_features99 for layer in self.layers:100 outputs = layer(output, ref_points, input_masks =input_masks, padding_masks=padding_mask, pos_encodings =pos_encodings)101 if self.norm is not None:102 for i, output in enumerate(outputs):103 outputs[i] = self.norm(output)104 return outputs105# Decoder 106class TransformerDecoder(nn.Module):107 def __init__(self, decoder_layer, num_layers, norm=None,return_intermediate=False):108 super().__init__()109 self.layers = nn.ModuleList([copy.deepcopy(decoder_layer) for i in range(num_layers)])110 self.num_layers = num_layers111 self.return_intermediate = return_intermediate112 self.norm = norm113 114 def forward(self, query_objects, out_encoder,115 ref_point,116 tgt_mask = None,117 memory_masks = None,118 tgt_key_padding_mask = None,119 memory_key_padding_masks = None,120 positional_embeddings = None,121 query_pos = None):122 123 124 125 # input of the decoder layers126 output = query_objects127 intermediate = []128 for layer in self.layers:129 output = layer( output, out_encoder,130 ref_point,131 tgt_mask = tgt_mask,132 memory_masks = memory_masks,133 tgt_key_padding_mask = tgt_key_padding_mask,134 memory_key_padding_masks = memory_key_padding_masks,135 positional_embeddings= positional_embeddings,136 query_poses = query_pos)137 138 139 140 if self.return_intermediate:141 intermediate.append(self.norm(output))142 if self.norm is not None:143 output = self.norm(output)144 if self.return_intermediate:145 intermediate.pop()146 intermediate.append(output)147 if self.return_intermediate:148 return torch.stack(intermediate)149 return output150 151class FeedForward(nn.Module):152 def __init__(self, C=256, d_ffn=1024, dropout=0.1):153 super(FeedForward, self).__init__()154 self.C = C155 self.d_ffn = d_ffn156 self.linear1 = nn.Linear(C, d_ffn)157 self.dropout1 = nn.Dropout(dropout)158 self.linear2 = nn.Linear(d_ffn, C)159 self.dropout2 = nn.Dropout(dropout)160 def forward(self, attended):161 attended_tmp = self.linear2(self.dropout1(F.relu(self.linear1(attended))))162 attended = attended + self.dropout2(attended_tmp)163 return attended 164class TransformerEncoderLayer(nn.Module):165 def __init__(self,C, M, K, n_levels, last_feat_height, last_feat_width, need_attn=False, d_ffn=2048,166 dropout=0.1, normalize_before=False):167 super().__init__()168 """169 Args:170 - C: dimesension of the embeddings171 - d_ffn : feed forward network dim172 - n_levels: multiscale parameter173 - M: number of attention heads174 - K: number of sampling points175 """176 # self attention177 178 179 180 181 self.self_attn = DeformableHeadAttention(last_height = last_feat_height,last_width = last_feat_width, C = C, M=M, K=K, L = n_levels, dropout=dropout, return_attentions = False)182 self.dropout1 = nn.Dropout(dropout)183 self.norm1 = nn.LayerNorm(C)184 self.norm2 = nn.LayerNorm(C)185 self.norm3 = nn.LayerNorm(C)186 self.normalize_before = normalize_before187 # g_theta188 self.ffn = FeedForward(C, d_ffn, dropout)189 def forward_pre_norm(self, input_features,190 ref_points,191 input_masks=None,192 padding_masks=None,193 pos_encodings=None):194 if input_masks is None:195 input_masks = [None] * len(input_features)196 if padding_masks is None:197 padding_masks = [None] * len(input_features)198 if pos_encodings is None:199 pos_encodings = [None] * len(pos_encodings)200 feats = []201 features = [feature + pos for (feature, pos) in zip(input_features, pos_encodings)] # add pos encodings to features202 for q, ref_point, key_padding_mask, pos in zip(features, ref_points, padding_masks, pos_encodings):203 feat = self.norm1(q) # pre normalization204 feat, attention = self.self_attn(feat, features, ref_point, key_padding_mask, padding_masks)205 q = q + self.dropout1(feat) #206 q = self.norm2(q) #207 q = self.ffn(q)208 feats.append(q)209 return feats210 def forward_post_norm(self, input_features,211 ref_points,212 input_masks=None,213 padding_masks=None,214 pos_encodings=None):215 if input_masks is None:216 input_masks = [None] * len(input_features)217 if padding_masks is None:218 padding_masks = [None] * len(input_features)219 if pos_encodings is None:220 pos_encodings = [None] * len(pos_encodings)221 feats = []222 features = [feature + pos for (feature, pos) in zip(input_features, pos_encodings)] # add pos encodings to features223 for q, ref_point, key_padding_mask, pos in zip(features, ref_points, padding_masks, pos_encodings):224 feat, attention = self.self_attn(q, features, ref_point, key_padding_mask, padding_masks)225 q = q + self.dropout1(feat) #226 q = self.norm1(q) #227 q = self.ffn(q)228 q = self.norm2(q) # post normalization229 feats.append(q)230 return feats231 def forward(self, input_features,232 ref_points,233 input_masks = None,234 padding_masks= None,235 pos_encodings = None):236 if self.normalize_before:237 return self.forward_pre_norm(input_features, ref_points, input_masks, padding_masks, pos_encodings)238 return self.forward_post_norm(input_features, ref_points, input_masks, padding_masks, pos_encodings)239 240class TransformerDecoderLayer(nn.Module):241 def __init__(self, C,242 M,243 K,244 n_levels,245 last_feat_height,246 last_feat_width,247 d_ffn=1024,248 dropout=0.1,249 normalize_before=False):250 super().__init__()251 """252 Args:253 - C: dimesension of the embeddings254 - d_ffn : feed forward network dim255 - n_levels: multiscale parameter256 - M: number of attention heads257 - K: number of sampling points258 """259 # Deformable Attention part260 self.def_attn = DeformableHeadAttention(last_height = last_feat_height,last_width = last_feat_width, C = C, M=M, K=K, L = n_levels, dropout=dropout, return_attentions = False) 261 self.dropout1 = nn.Dropout(dropout)262 self.norm1 = nn.LayerNorm(C)263 self.norm2 = nn.LayerNorm(C)264 self.norm3 = nn.LayerNorm(C)265 # Proper Attention Part266 self.self_attn = nn.MultiheadAttention(C, M, dropout=dropout)267 self.dropout2 = nn.Dropout(dropout)268 self.normalize_before = normalize_before269 # the feed forwrd network270 self.ffn = FeedForward(C, d_ffn)271 def forward(self, query_objects, out_encoder,272 ref_points,273 tgt_mask = None,274 memory_masks = None,275 tgt_key_padding_mask = None,276 memory_key_padding_masks = None,277 positional_embeddings = None,278 query_poses = None):279 if self.normalize_before:280 return self.forward_pre_norm(query_objects, out_encoder,ref_points,tgt_mask,281 memory_masks,tgt_key_padding_mask,282 memory_key_padding_masks, positional_embeddings, query_poses)283 return self.forward_post_norm(query_objects, out_encoder,ref_points,tgt_mask,284 memory_masks,tgt_key_padding_mask,285 memory_key_padding_masks, positional_embeddings, query_poses)286 def forward_post_norm(self, query_objects, out_encoder,287 ref_points,288 tgt_mask = None,289 memory_masks = None,290 tgt_key_padding_mask = None,291 memory_key_padding_masks = None,292 positional_embeddings = None,293 query_poses = None):294 """295 Args:296 - tgt : query_embedding passed to the transformer297 - memory : result of the encoder298 - ref_point : linear projection of tgt to 2 dim (in the encoder)299 - memory_key_padding_masks : mask passed to the transformer300 - poses : positional embeddings passed to the transformer301 - query_pos : query_embed passed to the transformer302 """303 # self attention304 q = query_objects + query_poses305 k = q306 query_objects_2 = self.self_attn(q, k, value=query_objects, attn_mask=tgt_mask,key_padding_mask=tgt_key_padding_mask)[0]307 query_objects = query_objects + self.dropout2(query_objects_2)308 query_objects = self.norm1(query_objects)309 # get the output of the encoder with positional embeddings310 out_encoder = [ tensor + pos for tensor, pos in zip(out_encoder, positional_embeddings)] #?311 #query_objects is of same shape as nn.Embedding(number of object queries, C)312 # L, B, C -> B, L, 1, C | L: number of object queries, B: size of batch313 query_objects = query_objects.transpose(0, 1).unsqueeze(dim=2)314 ref_points = ref_points.transpose(0, 1).unsqueeze(dim=2)315 # B, L, 1, 2316 query_objects_2, attention_weights = self.def_attn(query_objects, out_encoder, ref_points,query_mask=None, x_masks=memory_key_padding_masks)317 """if self.need_attn:318 self.attns.append(attns)"""319 query_objects = query_objects + self.dropout2(query_objects_2)320 query_objects = self.norm2(query_objects)321 query_objects = self.ffn(query_objects)322 query_objects = self.norm3(query_objects) #post normalization323 # B, L, 1, C -> L, B, C324 query_objects = query_objects.squeeze(dim=2)325 query_objects = query_objects.transpose(0, 1).contiguous()326 return query_objects327 def forward_pre_norm(self, query_objects, out_encoder,328 ref_points,329 tgt_mask = None,330 memory_masks = None,331 tgt_key_padding_mask = None,332 memory_key_padding_masks = None,333 positional_embeddings = None,334 query_poses = None):335 """336 Args:337 - query_objects : query_embedding passed to the transformer338 - out_encoder : result of the encoder339 - ref_points : linear projection of tgt to 2 dim (in the encoder)340 - memory_key_padding_masks : mask passed to the transformer341 - positional_embeddings : positional embeddings passed to the transformer342 - query_poses : query_embed passed to the transformer343 """344 # self attention345 query_objects_2 = self.norm1(query_objects)346 q = query_objects_2 + query_poses347 k = q348 query_objects_2 = self.self_attn(q, k, value=query_objects, attn_mask=tgt_mask,key_padding_mask=tgt_key_padding_mask)[0]349 query_objects = query_objects + self.dropout2(query_objects_2)350 query_objects_2 = self.norm2(query_objects)351 # get the output of the encoder with positional embeddings352 out_encoder = [ tensor + pos for tensor, pos in zip(out_encoder, positional_embeddings)]353 #query_objects is of same shape as nn.Embedding(number of object queries, C)354 # L, B, C -> B, L, 1, C | L: number of object queries, B: size of batch355 query_objects = query_objects.transpose(0, 1).unsqueeze(dim=2)356 query_ref_point = query_ref_point.transpose(0, 1).unsqueeze(dim=2)357 # B, L, 1, 2358 query_objects_2, attention_weights = self.def_attn(q, out_encoder, query_ref_point, query_mask=None, x_masks=memory_key_padding_masks)359 """if self.need_attn: 360 self.attns.append(attns)"""361 query_objects = query_objects + self.dropout2(query_objects_2)362 query_objects = self.norm3(query_objects)363 query_objects = self.ffn(query_objects)364 # B, L, 1, C -> L, B, C365 query_objects = query_objects.squeeze(dim=2)366 query_objects = query_objects.transpose(0, 1).contiguous()...

Full Screen

Full Screen

decoder-checkpoint.py

Source:decoder-checkpoint.py Github

copy

Full Screen

1import torch2import torch.nn.functional as F3from torch import nn, Tensor4from .MultiHeadAttention import DeformableHeadAttention5import copy6class DeformableTransformerDecoderLayer(nn.Module):7 def __init__(self, C, M, K, n_levels, last_feat_height, last_feat_width, d_ffn=1024, dropout=0.1, normalize_before=False):8 super().__init__()9 """10 Args:11 - C: Number of expected features in the decoder inputs.12 - d_ffn : feed forward network dimension.13 - n_levels: multiscale parameter.14 - M: number of attention heads.15 - K: number of sampling points.16 """17 # Deformable Attention part18 self.def_attn = DeformableHeadAttention(last_height = last_feat_height,last_width = last_feat_width, C = C, M=M, K=K, L = n_levels, dropout=dropout, return_attentions = False) 19 self.dropout1 = nn.Dropout(dropout)20 self.norm1 = nn.LayerNorm(C)21 self.norm2 = nn.LayerNorm(C)22 self.norm3 = nn.LayerNorm(C)23 # Proper Attention Part24 self.self_attn = nn.MultiheadAttention(C, M, dropout=dropout)25 self.dropout2 = nn.Dropout(dropout)26 self.normalize_before = normalize_before27 # the feed forward network28 self.ffn = FeedForward(C, d_ffn)29 30 def forward(self, query_objects, out_encoder, ref_points, tgt_mask = None, memory_masks = None,31 tgt_key_padding_mask = None, memory_key_padding_masks = None,positional_embeddings = None,32 query_poses = None):33 """34 Args:35 - query_objects : query_embedding passed to the transformer.36 - out_encoder : result of the encoder.37 - ref_points : linear projection of tgt to 2 dim (in the encoder).38 - memory_key_padding_masks : the mask passed to the transformer.39 - tgt_key_padding_mask : the mask for target keys per batch.40 - positional_embeddings : positional embeddings passed to the transformer.41 - query_poses : query_embed passed to the transformer.42 """43 if self.normalize_before:44 return self.forward_pre_norm(query_objects, out_encoder,ref_points,tgt_mask,45 memory_masks,tgt_key_padding_mask,46 memory_key_padding_masks, positional_embeddings, query_poses)47 return self.forward_post_norm(query_objects, out_encoder,ref_points,tgt_mask,48 memory_masks,tgt_key_padding_mask,49 memory_key_padding_masks, positional_embeddings, query_poses)50 def forward_post_norm(self, query_objects, out_encoder, ref_points, tgt_mask = None, memory_masks = None, 51 tgt_key_padding_mask = None, memory_key_padding_masks = None, positional_embeddings = None, 52 query_poses = None):53 # self attention54 q = query_objects + query_poses55 k = q56 query_objects_2 = self.self_attn(q, k, value=query_objects, attn_mask=tgt_mask,key_padding_mask=tgt_key_padding_mask)[0]57 query_objects = query_objects + self.dropout2(query_objects_2)58 query_objects = self.norm1(query_objects)59 # get the output of the encoder with positional embeddings60 out_encoder = [ tensor + pos for tensor, pos in zip(out_encoder, positional_embeddings)] #?61 #query_objects is of same shape as nn.Embedding(number of object queries, C)62 # L, B, C -> B, L, 1, C | L: number of object queries, B: size of batch63 query_objects = query_objects.transpose(0, 1).unsqueeze(dim=2)64 ref_points = ref_points.transpose(0, 1).unsqueeze(dim=2)65 # B, L, 1, 266 query_objects_2, attention_weights = self.def_attn(query_objects, out_encoder, ref_points,query_mask=None, x_masks=memory_key_padding_masks)67 query_objects = query_objects + self.dropout2(query_objects_2)68 query_objects = self.norm2(query_objects)69 query_objects = self.ffn(query_objects)70 query_objects = self.norm3(query_objects) #post normalization71 # B, L, 1, C -> L, B, C72 query_objects = query_objects.squeeze(dim=2)73 query_objects = query_objects.transpose(0, 1).contiguous()74 return query_objects75 def forward_pre_norm(self, query_objects, out_encoder, ref_points, tgt_mask = None, memory_masks = None, 76 tgt_key_padding_mask = None, memory_key_padding_masks = None, positional_embeddings = None,77 query_poses = None):78 # self attention79 query_objects_2 = self.norm1(query_objects)80 q = query_objects_2 + query_poses81 k = q82 query_objects_2 = self.self_attn(q, k, value=query_objects, attn_mask=tgt_mask,key_padding_mask=tgt_key_padding_mask)[0]83 query_objects = query_objects + self.dropout2(query_objects_2)84 query_objects_2 = self.norm2(query_objects)85 # get the output of the encoder with positional embeddings86 out_encoder = [ tensor + pos for tensor, pos in zip(out_encoder, positional_embeddings)]87 #query_objects is of same shape as nn.Embedding(number of object queries, C)88 # L, B, C -> B, L, 1, C | L: number of object queries, B: size of batch89 query_objects = query_objects.transpose(0, 1).unsqueeze(dim=2)90 query_ref_point = query_ref_point.transpose(0, 1).unsqueeze(dim=2)91 # B, L, 1, 292 query_objects_2, attention_weights = self.def_attn(q, out_encoder, query_ref_point, query_mask=None, x_masks=memory_key_padding_masks)93 query_objects = query_objects + self.dropout2(query_objects_2)94 query_objects = self.norm3(query_objects)95 query_objects = self.ffn(query_objects)96 # B, L, 1, C -> L, B, C97 query_objects = query_objects.squeeze(dim=2)98 query_objects = query_objects.transpose(0, 1).contiguous()99 return query_objects100class DeformableTransformerDecoder(nn.Module):101 102 def __init__(self, decoder_layer, num_layers, norm=None,return_intermediate=False):103 """104 Args:105 - decoder_layer: an instance of the DeformableTransformerDecoderLayer() class.106 - num_layers: the number of sub-decoder-layers in the decoder.107 - norm: the layer normalization component (optional).108 """109 super().__init__()110 self.layers = nn.ModuleList([copy.deepcopy(decoder_layer) for i in range(num_layers)])111 self.num_layers = num_layers112 self.return_intermediate = return_intermediate113 self.norm = norm114 115 def forward(self, query_objects, out_encoder,116 ref_point,117 tgt_mask = None,118 memory_masks = None,119 tgt_key_padding_mask = None,120 memory_key_padding_masks = None,121 positional_embeddings = None,122 query_pos = None):123 124 # input of the decoder layers125 output = query_objects126 intermediate = []127 for layer in self.layers:128 output = layer(output, out_encoder, ref_point, tgt_mask = tgt_mask,memory_masks = memory_masks,tgt_key_padding_mask = tgt_key_padding_mask,129 memory_key_padding_masks = memory_key_padding_masks,positional_embeddings= positional_embeddings,query_poses = query_pos)130 if self.return_intermediate:131 intermediate.append(self.norm(output))132 if self.norm is not None:133 output = self.norm(output)134 if self.return_intermediate:135 intermediate.pop()136 intermediate.append(output)137 if self.return_intermediate:138 return torch.stack(intermediate)139 return output140class FeedForward(nn.Module):141 """Simple Feed Forward Network"""142 def __init__(self, C=256, d_ffn=1024, dropout=0.5):143 super(FeedForward, self).__init__()144 self.C = C145 self.d_ffn = d_ffn146 self.linear1 = nn.Linear(C, d_ffn)147 self.dropout1 = nn.Dropout(dropout)148 self.linear2 = nn.Linear(d_ffn, C)149 self.dropout2 = nn.Dropout(dropout)150 def forward(self, attended):151 attended_tmp = self.linear2(self.dropout1(F.relu(self.linear1(attended))))152 attended = attended + self.dropout2(attended_tmp)...

Full Screen

Full Screen

decoder.py

Source:decoder.py Github

copy

Full Screen

1import torch2import torch.nn.functional as F3from torch import nn, Tensor4from .MultiHeadAttention import DeformableHeadAttention5import copy6class DeformableTransformerDecoderLayer(nn.Module):7 def __init__(self, C, M, K, n_levels, last_feat_height, last_feat_width, d_ffn=1024, dropout=0.1, normalize_before=False):8 super().__init__()9 """10 Args:11 - C: Number of expected features in the decoder inputs.12 - d_ffn : feed forward network dimension.13 - n_levels: multiscale parameter.14 - M: number of attention heads.15 - K: number of sampling points.16 """17 # Deformable Attention part18 self.def_attn = DeformableHeadAttention(last_height = last_feat_height,last_width = last_feat_width, C = C, M=M, K=K, L = n_levels, dropout=dropout, return_attentions = False) 19 self.dropout1 = nn.Dropout(dropout)20 self.norm1 = nn.LayerNorm(C)21 self.norm2 = nn.LayerNorm(C)22 self.norm3 = nn.LayerNorm(C)23 # Proper Attention Part24 self.self_attn = nn.MultiheadAttention(C, M, dropout=dropout)25 self.dropout2 = nn.Dropout(dropout)26 self.normalize_before = normalize_before27 # the feed forward network28 self.ffn = FeedForward(C, d_ffn)29 30 def forward(self, query_objects, out_encoder, ref_points, tgt_mask = None, memory_masks = None,31 tgt_key_padding_mask = None, memory_key_padding_masks = None,positional_embeddings = None,32 query_poses = None):33 """34 Args:35 - query_objects : query_embedding passed to the transformer.36 - out_encoder : result of the encoder.37 - ref_points : linear projection of tgt to 2 dim (in the encoder).38 - memory_key_padding_masks : the mask passed to the transformer.39 - tgt_key_padding_mask : the mask for target keys per batch.40 - positional_embeddings : positional embeddings passed to the transformer.41 - query_poses : query_embed passed to the transformer.42 """43 if self.normalize_before:44 return self.forward_pre_norm(query_objects, out_encoder,ref_points,tgt_mask,45 memory_masks,tgt_key_padding_mask,46 memory_key_padding_masks, positional_embeddings, query_poses)47 return self.forward_post_norm(query_objects, out_encoder,ref_points,tgt_mask,48 memory_masks,tgt_key_padding_mask,49 memory_key_padding_masks, positional_embeddings, query_poses)50 def forward_post_norm(self, query_objects, out_encoder, ref_points, tgt_mask = None, memory_masks = None, 51 tgt_key_padding_mask = None, memory_key_padding_masks = None, positional_embeddings = None, 52 query_poses = None):53 # self attention54 q = query_objects + query_poses55 k = q56 query_objects_2 = self.self_attn(q, k, value=query_objects, attn_mask=tgt_mask,key_padding_mask=tgt_key_padding_mask)[0]57 query_objects = query_objects + self.dropout2(query_objects_2)58 query_objects = self.norm1(query_objects)59 # get the output of the encoder with positional embeddings60 out_encoder = [ tensor + pos for tensor, pos in zip(out_encoder, positional_embeddings)] #?61 #query_objects is of same shape as nn.Embedding(number of object queries, C)62 # L, B, C -> B, L, 1, C | L: number of object queries, B: size of batch63 query_objects = query_objects.transpose(0, 1).unsqueeze(dim=2)64 ref_points = ref_points.transpose(0, 1).unsqueeze(dim=2)65 # B, L, 1, 266 query_objects_2, attention_weights = self.def_attn(query_objects, out_encoder, ref_points,query_mask=None, x_masks=memory_key_padding_masks)67 query_objects = query_objects + self.dropout2(query_objects_2)68 query_objects = self.norm2(query_objects)69 query_objects = self.ffn(query_objects)70 query_objects = self.norm3(query_objects) #post normalization71 # B, L, 1, C -> L, B, C72 query_objects = query_objects.squeeze(dim=2)73 query_objects = query_objects.transpose(0, 1).contiguous()74 return query_objects75 def forward_pre_norm(self, query_objects, out_encoder, ref_points, tgt_mask = None, memory_masks = None, 76 tgt_key_padding_mask = None, memory_key_padding_masks = None, positional_embeddings = None,77 query_poses = None):78 # self attention79 query_objects_2 = self.norm1(query_objects)80 q = query_objects_2 + query_poses81 k = q82 query_objects_2 = self.self_attn(q, k, value=query_objects, attn_mask=tgt_mask,key_padding_mask=tgt_key_padding_mask)[0]83 query_objects = query_objects + self.dropout2(query_objects_2)84 query_objects_2 = self.norm2(query_objects)85 # get the output of the encoder with positional embeddings86 out_encoder = [ tensor + pos for tensor, pos in zip(out_encoder, positional_embeddings)]87 #query_objects is of same shape as nn.Embedding(number of object queries, C)88 # L, B, C -> B, L, 1, C | L: number of object queries, B: size of batch89 query_objects = query_objects.transpose(0, 1).unsqueeze(dim=2)90 query_ref_point = query_ref_point.transpose(0, 1).unsqueeze(dim=2)91 # B, L, 1, 292 query_objects_2, attention_weights = self.def_attn(q, out_encoder, query_ref_point, query_mask=None, x_masks=memory_key_padding_masks)93 query_objects = query_objects + self.dropout2(query_objects_2)94 query_objects = self.norm3(query_objects)95 query_objects = self.ffn(query_objects)96 # B, L, 1, C -> L, B, C97 query_objects = query_objects.squeeze(dim=2)98 query_objects = query_objects.transpose(0, 1).contiguous()99 return query_objects100class DeformableTransformerDecoder(nn.Module):101 102 def __init__(self, decoder_layer, num_layers, norm=None,return_intermediate=False):103 """104 Args:105 - decoder_layer: an instance of the DeformableTransformerDecoderLayer() class.106 - num_layers: the number of sub-decoder-layers in the decoder.107 - norm: the layer normalization component (optional).108 """109 super().__init__()110 self.layers = nn.ModuleList([copy.deepcopy(decoder_layer) for i in range(num_layers)])111 self.num_layers = num_layers112 self.return_intermediate = return_intermediate113 self.norm = norm114 115 def forward(self, query_objects, out_encoder, ref_point, tgt_mask = None, memory_masks = None,116 tgt_key_padding_mask = None, memory_key_padding_masks = None, positional_embeddings = None, query_pos = None):117 118 # input of the decoder layers119 output = query_objects120 intermediate = []121 for layer in self.layers:122 output = layer(output, out_encoder, ref_point, tgt_mask = tgt_mask,memory_masks = memory_masks,tgt_key_padding_mask = tgt_key_padding_mask,123 memory_key_padding_masks = memory_key_padding_masks,positional_embeddings= positional_embeddings,query_poses = query_pos)124 if self.return_intermediate:125 intermediate.append(self.norm(output))126 if self.norm is not None:127 output = self.norm(output)128 if self.return_intermediate:129 intermediate.pop()130 intermediate.append(output)131 if self.return_intermediate:132 return torch.stack(intermediate)133 return output134class FeedForward(nn.Module):135 """Simple Feed Forward Network"""136 def __init__(self, C=256, d_ffn=1024, dropout=0.5):137 super(FeedForward, self).__init__()138 self.C = C139 self.d_ffn = d_ffn140 self.linear1 = nn.Linear(C, d_ffn)141 self.dropout1 = nn.Dropout(dropout)142 self.linear2 = nn.Linear(d_ffn, C)143 self.dropout2 = nn.Dropout(dropout)144 def forward(self, attended):145 attended_tmp = self.linear2(self.dropout1(F.relu(self.linear1(attended))))146 attended = attended + self.dropout2(attended_tmp)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful