How to use d_mul method in fMBT

Best Python code snippet using fMBT_python

conv_module.py

Source:conv_module.py Github

copy

Full Screen

1import math2import torch3import numpy as np4import torch.nn as nn5from .activate import *6from torch.nn import init7from itertools import repeat8import torch.nn.functional as F9from torch._six import container_abcs10from torch._jit_internal import Optional11from torch.nn.parameter import Parameter12from torch.nn.modules.module import Module13import yolov4_config as cfg14norm_name = {"bn": nn.BatchNorm2d}15activate_name = {"relu": nn.ReLU, "leaky": nn.LeakyReLU, "mish": Mish}16class Convolutional(nn.Module):17 def __init__(18 self,19 filters_in,20 filters_out,21 kernel_size,22 stride,23 pad,24 norm=None,25 activate=None,26 ):27 super(Convolutional, self).__init__()28 self.norm = norm29 self.activate = activate30 if cfg.CONV_TYPE["TYPE"] == "DO_CONV":31 self.__conv = DOConv2d(32 in_channels=filters_in,33 out_channels=filters_out,34 kernel_size=kernel_size,35 stride=stride,36 padding=pad,37 bias=not norm,38 )39 else:40 self.__conv = nn.Conv2d(41 in_channels=filters_in,42 out_channels=filters_out,43 kernel_size=kernel_size,44 stride=stride,45 padding=pad,46 bias=not norm,47 )48 if norm:49 assert norm in norm_name.keys()50 if norm == "bn":51 self.__norm = norm_name[norm](num_features=filters_out)52 if activate:53 assert activate in activate_name.keys()54 if activate == "leaky":55 self.__activate = activate_name[activate](56 negative_slope=0.1, inplace=True57 )58 if activate == "relu":59 self.__activate = activate_name[activate](inplace=True)60 def forward(self, x):61 x = self.__conv(x)62 if self.norm:63 x = self.__norm(x)64 if self.activate:65 x = self.__activate(x)66 return x67class DOConv2d(Module):68 """69 DOConv2d can be used as an alternative for torch.nn.Conv2d.70 The interface is similar to that of Conv2d, with one exception:71 1. D_mul: the depth multiplier for the over-parameterization.72 Note that the groups parameter switchs between DO-Conv (groups=1),73 DO-DConv (groups=in_channels), DO-GConv (otherwise).74 """75 __constants__ = [76 "stride",77 "padding",78 "dilation",79 "groups",80 "padding_mode",81 "output_padding",82 "in_channels",83 "out_channels",84 "kernel_size",85 "D_mul",86 ]87 __annotations__ = {"bias": Optional[torch.Tensor]}88 def __init__(89 self,90 in_channels,91 out_channels,92 kernel_size,93 D_mul=None,94 stride=1,95 padding=0,96 dilation=1,97 groups=1,98 bias=True,99 padding_mode="zeros",100 ):101 super(DOConv2d, self).__init__()102 kernel_size = _pair(kernel_size)103 stride = _pair(stride)104 padding = _pair(padding)105 dilation = _pair(dilation)106 if in_channels % groups != 0:107 raise ValueError("in_channels must be divisible by groups")108 if out_channels % groups != 0:109 raise ValueError("out_channels must be divisible by groups")110 valid_padding_modes = {"zeros", "reflect", "replicate", "circular"}111 if padding_mode not in valid_padding_modes:112 raise ValueError(113 "padding_mode must be one of {}, but got padding_mode='{}'".format(114 valid_padding_modes, padding_mode115 )116 )117 self.in_channels = in_channels118 self.out_channels = out_channels119 self.kernel_size = kernel_size120 self.stride = stride121 self.padding = padding122 self.dilation = dilation123 self.groups = groups124 self.padding_mode = padding_mode125 self._padding_repeated_twice = tuple(126 x for x in self.padding for _ in range(2)127 )128 #################################### Initailization of D & W ###################################129 M = self.kernel_size[0]130 N = self.kernel_size[1]131 self.D_mul = M * N if D_mul is None or M * N <= 1 else D_mul132 self.W = Parameter(133 torch.Tensor(out_channels, in_channels // groups, self.D_mul)134 )135 init.kaiming_uniform_(self.W, a=math.sqrt(5))136 if M * N > 1:137 self.D = Parameter(torch.Tensor(in_channels, M * N, self.D_mul))138 init_zero = np.zeros(139 [in_channels, M * N, self.D_mul], dtype=np.float32140 )141 self.D.data = torch.from_numpy(init_zero)142 eye = torch.reshape(143 torch.eye(M * N, dtype=torch.float32), (1, M * N, M * N)144 )145 D_diag = eye.repeat((in_channels, 1, self.D_mul // (M * N)))146 if self.D_mul % (M * N) != 0: # the cases when D_mul > M * N147 zeros = torch.zeros([in_channels, M * N, self.D_mul % (M * N)])148 self.D_diag = Parameter(149 torch.cat([D_diag, zeros], dim=2), requires_grad=False150 )151 else: # the case when D_mul = M * N152 self.D_diag = Parameter(D_diag, requires_grad=False)153 ##################################################################################################154 if bias:155 self.bias = Parameter(torch.Tensor(out_channels))156 fan_in, _ = init._calculate_fan_in_and_fan_out(self.W)157 bound = 1 / math.sqrt(fan_in)158 init.uniform_(self.bias, -bound, bound)159 else:160 self.register_parameter("bias", None)161 def extra_repr(self):162 s = (163 "{in_channels}, {out_channels}, kernel_size={kernel_size}"164 ", stride={stride}"165 )166 if self.padding != (0,) * len(self.padding):167 s += ", padding={padding}"168 if self.dilation != (1,) * len(self.dilation):169 s += ", dilation={dilation}"170 if self.groups != 1:171 s += ", groups={groups}"172 if self.bias is None:173 s += ", bias=False"174 if self.padding_mode != "zeros":175 s += ", padding_mode={padding_mode}"176 return s.format(**self.__dict__)177 def __setstate__(self, state):178 super(DOConv2d, self).__setstate__(state)179 if not hasattr(self, "padding_mode"):180 self.padding_mode = "zeros"181 def _conv_forward(self, input, weight):182 if self.padding_mode != "zeros":183 return F.conv2d(184 F.pad(185 input, self._padding_repeated_twice, mode=self.padding_mode186 ),187 weight,188 self.bias,189 self.stride,190 _pair(0),191 self.dilation,192 self.groups,193 )194 return F.conv2d(195 input,196 weight,197 self.bias,198 self.stride,199 self.padding,200 self.dilation,201 self.groups,202 )203 def forward(self, input):204 M = self.kernel_size[0]205 N = self.kernel_size[1]206 DoW_shape = (self.out_channels, self.in_channels // self.groups, M, N)207 if M * N > 1:208 ######################### Compute DoW #################209 # (input_channels, D_mul, M * N)210 D = self.D + self.D_diag211 W = torch.reshape(212 self.W,213 (214 self.out_channels // self.groups,215 self.in_channels,216 self.D_mul,217 ),218 )219 # einsum outputs (out_channels // groups, in_channels, M * N),220 # which is reshaped to221 # (out_channels, in_channels // groups, M, N)222 DoW = torch.reshape(torch.einsum("ims,ois->oim", D, W), DoW_shape)223 #######################################################224 else:225 # in this case D_mul == M * N226 # reshape from227 # (out_channels, in_channels // groups, D_mul)228 # to229 # (out_channels, in_channels // groups, M, N)230 DoW = torch.reshape(self.W, DoW_shape)231 return self._conv_forward(input, DoW)232def _ntuple(n):233 def parse(x):234 if isinstance(x, container_abcs.Iterable):235 return x236 return tuple(repeat(x, n))237 return parse...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fMBT automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful