How to use is_pos method in Airtest

Best Python code snippet using Airtest

triplet_loss.py

Source:triplet_loss.py Github

copy

Full Screen

1# encoding: utf-82"""3@author: liaoxingyu4@contact: sherlockliao01@gmail.com5"""6import torch7import torch.nn.functional as F8from fastreid.utils import comm9from fastreid.layers import GatherLayer10from .utils import concat_all_gather, euclidean_dist, normalize11def softmax_weights(dist, mask):12 max_v = torch.max(dist * mask, dim=1, keepdim=True)[0]13 diff = dist - max_v14 Z = torch.sum(torch.exp(diff) * mask, dim=1, keepdim=True) + 1e-6 # avoid division by zero15 W = torch.exp(diff) * mask / Z16 return W17def hard_example_mining(dist_mat, is_pos, is_neg):18 """For each anchor, find the hardest positive and negative sample.19 Args:20 dist_mat: pair wise distance between samples, shape [N, M]21 is_pos: positive index with shape [N, M]22 is_neg: negative index with shape [N, M]23 Returns:24 dist_ap: pytorch Variable, distance(anchor, positive); shape [N]25 dist_an: pytorch Variable, distance(anchor, negative); shape [N]26 p_inds: pytorch LongTensor, with shape [N];27 indices of selected hard positive samples; 0 <= p_inds[i] <= N - 128 n_inds: pytorch LongTensor, with shape [N];29 indices of selected hard negative samples; 0 <= n_inds[i] <= N - 130 NOTE: Only consider the case in which all labels have same num of samples,31 thus we can cope with all anchors in parallel.32 """33 assert len(dist_mat.size()) == 234 N = dist_mat.size(0)35 # `dist_ap` means distance(anchor, positive)36 # both `dist_ap` and `relative_p_inds` with shape [N, 1]37 dist_ap, relative_p_inds = torch.max(38 dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)39 # `dist_an` means distance(anchor, negative)40 # both `dist_an` and `relative_n_inds` with shape [N, 1]41 dist_an, relative_n_inds = torch.min(42 dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)43 # shape [N]44 dist_ap = dist_ap.squeeze(1)45 dist_an = dist_an.squeeze(1)46 return dist_ap, dist_an47def weighted_example_mining(dist_mat, is_pos, is_neg):48 """For each anchor, find the weighted positive and negative sample.49 Args:50 dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]51 is_pos:52 is_neg:53 Returns:54 dist_ap: pytorch Variable, distance(anchor, positive); shape [N]55 dist_an: pytorch Variable, distance(anchor, negative); shape [N]56 """57 assert len(dist_mat.size()) == 258 is_pos = is_pos.float()59 is_neg = is_neg.float()60 dist_ap = dist_mat * is_pos61 dist_an = dist_mat * is_neg62 weights_ap = softmax_weights(dist_ap, is_pos)63 weights_an = softmax_weights(-dist_an, is_neg)64 dist_ap = torch.sum(dist_ap * weights_ap, dim=1)65 dist_an = torch.sum(dist_an * weights_an, dim=1)66 return dist_ap, dist_an67def triplet_loss(embedding, targets, margin, norm_feat, hard_mining):68 r"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).69 Related Triplet Loss theory can be found in paper 'In Defense of the Triplet70 Loss for Person Re-Identification'."""71 if norm_feat: embedding = normalize(embedding, axis=-1)72 # For distributed training, gather all features from different process.73 if comm.get_world_size() > 1:74 all_embedding = torch.cat(GatherLayer.apply(embedding), dim=0)75 all_targets = concat_all_gather(targets)76 else:77 all_embedding = embedding78 all_targets = targets79 dist_mat = euclidean_dist(all_embedding, all_embedding)80 N, N = dist_mat.size()81 is_pos = all_targets.view(N, 1).expand(N, N).eq(all_targets.view(N, 1).expand(N, N).t())82 is_neg = all_targets.view(N, 1).expand(N, N).ne(all_targets.view(N, 1).expand(N, N).t())83 if hard_mining:84 dist_ap, dist_an = hard_example_mining(dist_mat, is_pos, is_neg)85 else:86 dist_ap, dist_an = weighted_example_mining(dist_mat, is_pos, is_neg)87 y = dist_an.new().resize_as_(dist_an).fill_(1)88 if margin > 0:89 loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=margin)90 else:91 loss = F.soft_margin_loss(dist_an - dist_ap, y)92 # fmt: off93 if loss == float('Inf'): loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3)94 # fmt: on...

Full Screen

Full Screen

pairwise.py

Source:pairwise.py Github

copy

Full Screen

1import torch2from torch import nn3from torch.nn import functional as F4class CosFacePairLoss(nn.Module):5 def __init__(self, s=30, m=0.50):6 super(CosFacePairLoss, self).__init__()7 self.s = s8 self.m = m9 def forward(self, embedding, targets):10 embedding = F.normalize(embedding, p=2, dim=1)11 # if torch.distributed.is_initialized():12 # embedding = AllGather(embedding)13 # targets = AllGather(targets)14 dist_mat = torch.matmul(embedding, embedding.t())15 N = dist_mat.size(0)16 is_pos = targets.view(N, 1).expand(N, N).eq(targets.view(N, 1).expand(N, N).t()).float()17 is_neg = targets.view(N, 1).expand(N, N).ne(targets.view(N, 1).expand(N, N).t()).float()18 # Mask scores related to itself19 is_pos = is_pos - torch.eye(N, N, device=is_pos.device)20 s_p = dist_mat * is_pos21 s_n = dist_mat * is_neg22 logit_p = -self.s * s_p + (-99999999.0) * (1 - is_pos)23 logit_n = self.s * (s_n + self.m) + (-99999999.0) * (1 - is_neg)24 loss = F.softplus(torch.logsumexp(logit_p, dim=1) + torch.logsumexp(logit_n, dim=1)).mean()25 return loss26class CosFacePairLoss_v2(nn.Module):27 def __init__(self, s=30, m_max=0.6, m_min=0.2, m=0):28 super(CosFacePairLoss_v2, self).__init__()29 self.s = s30 self.m_max = m_max31 self.m_min = m_min32 def forward(self, embedding, targets):33 embedding = F.normalize(embedding, p=2, dim=1)34 # if torch.distributed.is_initialized():35 # embedding = AllGather(embedding)36 # targets = AllGather(targets)37 dist_mat = torch.matmul(embedding, embedding.t())38 N = dist_mat.size(0)39 mask = is_pos = targets.view(N, 1).expand(N, N).eq(targets.view(N, 1).expand(N, N).t()).float()40 is_neg = targets.view(N, 1).expand(N, N).ne(targets.view(N, 1).expand(N, N).t()).float()41 # Mask scores related to itself42 is_pos = is_pos - torch.eye(N, N, device=is_pos.device)43 p_simi = (dist_mat * is_pos).sum(1) / is_pos.sum(1)44 p_simi = p_simi.mean()45 m = (self.m_max - self.m_min) * p_simi + self.m_min46 mask_hard_neg = ((p_simi - m) < dist_mat) * (1 - mask)47 scale_matrix = self.s * (1 - mask_hard_neg) + self.s * mask_hard_neg * (p_simi + self.m_max - self.m_min)48 s_p = dist_mat * is_pos49 s_n = dist_mat * is_neg50 logit_p = -scale_matrix * s_p + (-99999999.0) * (1 - is_pos)51 logit_n = scale_matrix * (s_n + m) + (-99999999.0) * (1 - is_neg)52 loss = F.softplus(torch.logsumexp(logit_p, dim=1) + torch.logsumexp(logit_n, dim=1)).mean()53 return loss54 55class CirclePairLoss(nn.Module):56 def __init__(self, s=30, m=0.30):57 super(CirclePairLoss, self).__init__()58 self.s = s59 self.m = m60 def forward(self, embedding, targets):61 embedding = F.normalize(embedding, p=2, dim=1)62 # if torch.distributed.is_initialized():63 # embedding = AllGather(embedding)64 # targets = AllGather(targets)65 dist_mat = torch.matmul(embedding, embedding.t())66 N = dist_mat.size(0)67 is_pos = targets.view(N, 1).expand(N, N).eq(targets.view(N, 1).expand(N, N).t()).float()68 is_neg = targets.view(N, 1).expand(N, N).ne(targets.view(N, 1).expand(N, N).t()).float()69 # Mask scores related to itself70 is_pos = is_pos - torch.eye(N, N, device=is_pos.device)71 s_p = dist_mat * is_pos72 s_n = dist_mat * is_neg73 alpha_p = torch.clamp_min(-s_p.detach() + 1 + self.m, min=0.)74 alpha_n = torch.clamp_min(s_n.detach() + self.m, min=0.)75 delta_p = 1 - self.m76 delta_n = self.m77 logit_p = - self.s * alpha_p * (s_p - delta_p) + (-99999999.) * (1 - is_pos)78 logit_n = self.s * alpha_n * (s_n - delta_n) + (-99999999.) * (1 - is_neg)79 loss = F.softplus(torch.logsumexp(logit_p, dim=1) + torch.logsumexp(logit_n, dim=1)).mean()...

Full Screen

Full Screen

multisoftmax.py

Source:multisoftmax.py Github

copy

Full Screen

1import torch2from torch import nn3import torch.nn.functional as F4eps = 1e-75class NCECriterion(nn.Module):6 """7 Eq. (12): L_{NCE}8 """9 def __init__(self, n_data):10 super(NCECriterion, self).__init__()11 self.n_data = n_data12 def forward(self, x):13 bsz = x.shape[0]14 m = x.size(1) - 115 # noise distribution16 Pn = 1 / float(self.n_data)17 # loss for positive pair18 P_pos = x.select(1, 0)19 log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()20 # loss for K negative pair21 P_neg = x.narrow(1, 1, m)22 log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()23 loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz24 return loss25class NCESoftmaxLoss(nn.Module):26 """Softmax cross-entropy loss (a.k.a., info-NCE loss in CPC paper)"""27 def __init__(self):28 super(NCESoftmaxLoss, self).__init__()29 self.criterion = nn.CrossEntropyLoss()30 def forward(self, x):31 bsz = x.shape[0]32 x = x.squeeze()33 label = torch.zeros([bsz]).cuda().long()34 loss = self.criterion(x, label)35 return loss36class MultiSoftmaxLoss(nn.Module):37 def __init__(self):38 super().__init__()39 # self.criterion = nn.KLDivLoss(reduction='batchmean')40 self.criterion = nn.CrossEntropyLoss()41 # self.criterion = nn.NLLLoss(reduction='mean')42 def forward(self, x, is_pos):43 bsz = x.shape[0]44 # ce_loss = self.criterion(x, torch.zeros([bsz]).cuda().long())45 x = x.squeeze()46 x = torch.exp(x)47 is_pos = torch.cat((torch.ones([bsz, 1], dtype=torch.long).cuda(), is_pos.long()), dim=1)48 is_neg = (1 - is_pos).float()49 neg_div = (x * is_neg).sum(dim=1, keepdim=True)50 x_logit = x / (x + neg_div)51 x_logit = -torch.log(x_logit)52 x_mask = x_logit * is_pos.float()53 num_pos = is_pos.sum(dim=1, keepdim=True).float()54 x_mask = x_mask / num_pos55 loss = x_mask.sum(dim=1).mean(dim=0)56 return loss57 # loss = 058 # for i in range(bsz):59 # tmp_loss = 060 # pos_inds = torch.where(is_pos[i] == 1)[0].tolist()61 # num_pos = len(pos_inds)62 # for j in pos_inds:63 # tmp_loss -= torch.log(x[i, j] / (neg_div[i][0] + x[i, j]))64 # loss += (tmp_loss / num_pos)65 # loss = loss / bsz66 #67 # print(loss)68 # print(fast_loss)69 # from ipdb import set_trace; set_trace()70 # print(ce_loss)71 # print(loss)72 # def forward(self, x, is_pos):73 # is_pos = is_pos.float()74 # bsz = x.shape[0]75 # x = x.squeeze()76 #77 # label = torch.zeros([bsz]).cuda().long()78 # # loss = self.criterion1(x, ce_label)79 #80 # # from ipdb import set_trace; set_trace()81 # # is_neg = 1 - is_pos[:, 1:]82 # x = F.softmax(x, dim=1)83 # x = (x * is_pos).sum(dim=1, keepdim=True)84 # # neg_logit = (x * is_neg)85 # # x = torch.cat((pos_logit, x[:, 1:]), dim=1) # [bsz, 16385]86 # # x = torch.log(x)87 #88 # loss = self.criterion(x.log(), label)89 # return loss90 # x = F.softmax(x, dim=1)91 # label = torch.cat((torch.ones([bsz, 1], dtype=torch.float32).cuda(), is_pos), dim=1) # (bsz, dim)92 # label = F.softmax(label, dim=1)93 # label = label / label.sum(dim=1, keepdim=True)94 # loss = torch.sum(x * torch.log(1e-9 + x / (label + 1e-9)), dim=1).mean(dim=0)95 # loss = torch.sum(x * (1e-9 + torch.log(x) - torch.log(label + 1e-9)), dim=1).mean(dim=0)96 # from ipdb import set_trace; set_trace()97 # loss = self.criterion(x, label)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Airtest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful