How to use window_size method in Airtest

Best Python code snippet using Airtest

SwinT.py

Source:SwinT.py Github

copy

Full Screen

1import torch2import torch.nn as nn3import torch.nn.functional as F4from timm.models.layers import to_2tuple, trunc_normal_5def default_conv(in_channels, out_channels, kernel_size, bias=True):6 return nn.Conv2d(7 in_channels, out_channels, kernel_size,8 padding=(kernel_size // 2), bias=bias)9class SwinT(nn.Module):10 def __init__(11 # self, conv, n_feats, kernel_size,12 # bias=True, bn=False, act=nn.ReLU(True)):13 self, n_feats=50):14 super(SwinT, self).__init__()15 m = []16 depth = 217 num_heads = 518 window_size = 819 resolution = 6420 mlp_ratio = 2.021 m.append(BasicLayer(dim=n_feats,22 depth=depth,23 resolution=resolution,24 num_heads=num_heads,25 window_size=window_size,26 mlp_ratio=mlp_ratio,27 qkv_bias=True, qk_scale=None,28 norm_layer=nn.LayerNorm))29 self.transformer_body = nn.Sequential(*m)30 def forward(self, x):31 res = self.transformer_body(x)32 return res33class BasicLayer(nn.Module):34 def __init__(self, dim, resolution, embed_dim=50, depth=2, num_heads=8, window_size=8,35 mlp_ratio=1., qkv_bias=True, qk_scale=None, norm_layer=None):36 super().__init__()37 self.dim = dim38 self.resolution = resolution39 self.depth = depth40 self.window_size = window_size41 # build blocks42 self.blocks = nn.ModuleList([43 SwinTransformerBlock(dim=dim, resolution=resolution,44 num_heads=num_heads, window_size=window_size,45 shift_size=0 if (i % 2 == 0) else window_size // 2,46 mlp_ratio=mlp_ratio,47 qkv_bias=qkv_bias, qk_scale=qk_scale,48 norm_layer=norm_layer)49 for i in range(depth)])50 self.patch_embed = PatchEmbed(51 embed_dim=dim, norm_layer=norm_layer)52 self.patch_unembed = PatchUnEmbed(embed_dim=dim)53 def check_image_size(self, x):54 _, _, h, w = x.size()55 mod_pad_h = (self.window_size - h % self.window_size) % self.window_size56 mod_pad_w = (self.window_size - w % self.window_size) % self.window_size57 if mod_pad_h != 0 or mod_pad_w != 0:58 x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')59 return x, h, w60 def forward(self, x):61 x, h, w = self.check_image_size(x)62 _, _, H, W = x.size()63 x_size = (H, W)64 x = self.patch_embed(x)65 for blk in self.blocks:66 x = blk(x, x_size)67 x = self.patch_unembed(x, x_size)68 if h != H or w != W:69 x = x[:, :, 0:h, 0:w].contiguous()70 return x71class SwinTransformerBlock(nn.Module):72 def __init__(self, dim, resolution, num_heads, window_size=8, shift_size=0,73 mlp_ratio=4., qkv_bias=True, qk_scale=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm):74 super().__init__()75 self.dim = dim76 self.resolution = to_2tuple(resolution)77 self.num_heads = num_heads78 self.window_size = window_size79 self.shift_size = shift_size80 self.mlp_ratio = mlp_ratio81 # if min(self.input_resolution) <= self.window_size:82 # # if window size is larger than input resolution, we don't partition windows83 # self.shift_size = 084 # self.window_size = min(self.input_resolution)85 assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"86 # self.norm1 = norm_layer(dim)87 self.attn = WindowAttention(88 dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,89 qkv_bias=qkv_bias,90 qk_scale=qk_scale)91 # self.norm2 = norm_layer(dim)92 mlp_hidden_dim = int(dim * mlp_ratio)93 self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer)94 if self.shift_size > 0:95 attn_mask = self.calculate_mask(self.resolution)96 else:97 attn_mask = None98 self.register_buffer("attn_mask", attn_mask)99 def calculate_mask(self, x_size):100 # calculate attention mask for SW-MSA101 H, W = x_size102 img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1103 h_slices = (slice(0, -self.window_size),104 slice(-self.window_size, -self.shift_size),105 slice(-self.shift_size, None))106 w_slices = (slice(0, -self.window_size),107 slice(-self.window_size, -self.shift_size),108 slice(-self.shift_size, None))109 cnt = 0110 for h in h_slices:111 for w in w_slices:112 img_mask[:, h, w, :] = cnt113 cnt += 1114 mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1115 mask_windows = mask_windows.view(-1, self.window_size * self.window_size)116 attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)117 attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))118 return attn_mask119 def forward(self, x, x_size):120 H, W = x_size121 B, L, C = x.shape122 shortcut = x123 # x = self.norm1(x)124 x = x.view(B, H, W, C)125 # cyclic shift126 if self.shift_size > 0:127 shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))128 else:129 shifted_x = x130 # partition windows131 x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C132 x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C133 # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size134 if self.resolution == x_size:135 attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C136 else:137 attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))138 # merge windows139 attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)140 shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C141 # reverse cyclic shift142 if self.shift_size > 0:143 x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))144 else:145 x = shifted_x146 x = x.view(B, H * W, C)147 # FFN148 x = shortcut + x149 # x = x + self.mlp(self.norm2(x))150 x = x + self.mlp(x)151 return x152class WindowAttention(nn.Module):153 def __init__(self, dim, window_size, num_heads,154 qkv_bias=True,155 qk_scale=None):156 super().__init__()157 self.dim = dim158 self.window_size = window_size # Wh, Ww159 self.num_heads = num_heads160 head_dim = dim // num_heads161 self.scale = qk_scale or head_dim ** -0.5162 # define a parameter table of relative position bias163 self.relative_position_bias_table = nn.Parameter(164 torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH165 # get pair-wise relative position index for each token inside the window166 coords_h = torch.arange(self.window_size[0])167 coords_w = torch.arange(self.window_size[1])168 coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww169 coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww170 relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww171 relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2172 relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0173 relative_coords[:, :, 1] += self.window_size[1] - 1174 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1175 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww176 self.register_buffer("relative_position_index", relative_position_index)177 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)178 self.proj = nn.Linear(dim, dim)179 trunc_normal_(self.relative_position_bias_table, std=.02)180 self.softmax = nn.Softmax(dim=-1)181 def forward(self, x, mask=None):182 B_, N, C = x.shape183 qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)184 q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)185 q = q * self.scale186 attn = (q @ k.transpose(-2, -1))187 relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(188 self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH189 relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww190 attn = attn + relative_position_bias.unsqueeze(0)191 if mask is not None:192 nW = mask.shape[0]193 attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)194 attn = attn.view(-1, self.num_heads, N, N)195 attn = self.softmax(attn)196 else:197 attn = self.softmax(attn)198 x = (attn @ v).transpose(1, 2).reshape(B_, N, C)199 x = self.proj(x)200 return x201def window_reverse(windows, window_size, H, W):202 B = int(windows.shape[0] / (H * W / window_size / window_size))203 x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)204 x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)205 return x206class Mlp(nn.Module):207 def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU):208 super().__init__()209 out_features = out_features or in_features210 hidden_features = hidden_features or in_features211 self.fc1 = nn.Linear(in_features, hidden_features)212 self.act = act_layer()213 self.fc2 = nn.Linear(hidden_features, out_features)214 def forward(self, x):215 x = self.fc1(x)216 x = self.act(x)217 x = self.fc2(x)218 return x219def window_partition(x, window_size):220 B, H, W, C = x.shape221 x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)222 windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)223 return windows224def window_reverse(windows, window_size, H, W):225 B = int(windows.shape[0] / (H * W / window_size / window_size))226 x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)227 x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)228 return x229class PatchEmbed(nn.Module):230 def __init__(self, embed_dim=50, norm_layer=None):231 super().__init__()232 if norm_layer is not None:233 self.norm = norm_layer(embed_dim)234 else:235 self.norm = None236 def forward(self, x):237 x = x.flatten(2).transpose(1, 2) # B Ph*Pw C238 if self.norm is not None:239 x = self.norm(x)240 return x241 def flops(self):242 flops = 0243 H, W = self.img_size244 if self.norm is not None:245 flops += H * W * self.embed_dim246 return flops247class PatchUnEmbed(nn.Module):248 def __init__(self, embed_dim=50):249 super().__init__()250 self.embed_dim = embed_dim251 def forward(self, x, x_size):252 B, HW, C = x.shape253 x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C254 return x255 def flops(self):256 flops = 0257 return flops258if __name__ == '__main__':259 x = torch.randn((1,50,170,170))260 model = SwinT()261 out = model(x)...

Full Screen

Full Screen

hash_sent.py

Source:hash_sent.py Github

copy

Full Screen

1#!/usr/bin/env python2import hashlib3import math4#This code provides a piecewise hashing of documents with variables to set the window_size for each piece, whether to split on words or characters, whether to provide sliding, and how big the sliding_window should be.5#Functions6#hashDocument:7 #Parameters:8 #text: text of document to be hashed9 #window_size: default of 25; number of characters or words to hash at a time10 #words: default of False; hash by characters = False; hash by words = True11 #sliding: default of True; slide the window and overlap the hashes or have no overlap12 #sliding_window: default of 4; ignored if sliding = False; must be less than the size of window_size13 #Description: Hashes a text document using a window_size to hash piecewise. Provide string of text document and string of piecewise hash for that text document will be returned.14#splitText:15 #Parameters:16 #text: text of document to be hashed17 #window_size: default of 25; number of characters or words to hash at a time18 #words: default of False; hash by characters = False; hash by words = True19 #sliding: default of True; slide the window and overlap the hashes or have no overlap20 #sliding_window: default of 4; ignored if sliding = False; must be less than the size of window_size21 #Description: Splits the text into the sizes needed for hashing. Returns a list of strings split based on the parameters provided22def hashDocument(text, window_size = 25, words = False, sliding = True, sliding_window = 4):23 #Error if sliding_window is bigger than or equal to window_size and sliding is turned on24 if sliding_window >= window_size and sliding:25 raise ValueError("sliding_window must be smaller than window_size;")26 returnText = ""27 #28 #Check to see if document length is less than window_size and just hash the whole doc if that's the case29 if len(text) < window_size:30 m = hashlib.md5()31 m.update(text.encode("utf-8"))32 return str(m.digest())[2:].strip().strip("'").strip()33 #Split text into distinct blocks for hashing34 textSplit = splitText(text, window_size, words, sliding, sliding_window)35 #36 #Md5 hash each part of the text and return the text as a long string of these hashes37 for item in textSplit:38 m = hashlib.md5()39 m.update(item.encode("utf-8"))40 returnText += str(m.digest())[2:].strip().strip("'").strip()41 return returnText42def splitText(text, window_size, words, sliding, sliding_window):43 textSplit = []44 if words:45 tempSplit = text.split(" ")46 if sliding:47 #Grab each window of window_size words from the text sliding sliding_window over each iteration.48 textSplit = [" ".join(tempSplit[i:i+window_size]) for i in range(0, len(tempSplit), sliding_window) if (i + window_size) < (len(tempSplit) + sliding_window)] 49 else:50 #Grab each window of window_size words from the text without overlap51 textSplit = [" ".join(tempSplit[i:i+window_size]) for i in range(0, len(tempSplit), window_size)] 52 else:53 if sliding:54 #Grab each window of window_size characters from the text sliding sliding_window over each iteration.55 textSplit = [text[i:i+window_size] for i in range(0, len(text), sliding_window) if (i + window_size) < (len(text) + sliding_window)] 56 else:57 #Grab each window of window_size characters from the text without overlap58 textSplit = [text[i:i+window_size] for i in range(0, len(text), window_size)] ...

Full Screen

Full Screen

__init__.py

Source:__init__.py Github

copy

Full Screen

1import torch2import torch.nn.functional as F3from torch.autograd import Variable4import numpy as np5from math import exp6import pdb7def gaussian(window_size, sigma):8 gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])9 return gauss/gauss.sum()10def create_window(window_size, channel):11 _1D_window = gaussian(window_size, 1.5).unsqueeze(1)12 _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)13 window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())14 return window15def _ssim(img1, img2, window, window_size, channel, size_average = True):16 mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)17 mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)18 mu1_sq = mu1.pow(2)19 mu2_sq = mu2.pow(2)20 mu1_mu2 = mu1*mu221 sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq22 sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq23 sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu224 C1 = 0.01**225 C2 = 0.03**226 ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))27 if size_average:28 return ssim_map.mean()29 else:30 return ssim_map.mean(1).mean(1).mean(1)31class SSIM(torch.nn.Module):32 def __init__(self, window_size = 11, size_average = True):33 super(SSIM, self).__init__()34 self.window_size = window_size35 self.size_average = size_average36 self.channel = 137 self.window = create_window(window_size, self.channel)38 def forward(self, img1, img2):39 # pdb.set_trace()40 (_, channel, _, _) = img1.size()41 if channel == self.channel and self.window.data.type() == img1.data.type():42 window = self.window43 else:44 window = create_window(self.window_size, channel)45 46 if img1.is_cuda:47 window = window.cuda(img1.get_device())48 window = window.type_as(img1)49 50 self.window = window51 self.channel = channel52 return _ssim(img1, img2, window, self.window_size, channel, self.size_average)53def ssim(img1, img2, window_size = 11, size_average = True):54 (_, channel, _, _) = img1.size()55 window = create_window(window_size, channel)56 57 if img1.is_cuda:58 window = window.cuda(img1.get_device())59 window = window.type_as(img1)60 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Airtest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful