How to use _forward method in fMBT

Best Python code snippet using fMBT_python

variations.py

Source:variations.py Github

copy

Full Screen

...72 def psi(cls, x):73 return torch.rand_like(x)74 @torch.no_grad()75 def forward(self, points: torch.Tensor):76 v = self._forward(points)77 if isinstance(v, tuple):78 v = torch.stack(list(v), dim=1)79 return v80 def __getstate__(self):81 return dict(82 p=self.p83 )84class Linear(Variation):85 def _forward(self, points):86 return points87class Sinusoidal(Variation):88 def _forward(self, points):89 return torch.sin(points)90class Spherical1(Variation):91 def _forward(self, points):92 return points / self.r(points).unsqueeze(1)93class Spherical2(Variation):94 def _forward(self, points):95 return points / self.r2(points).unsqueeze(1)96class Swirl(Variation):97 def _forward(self, points):98 x = self.x(points)99 y = self.y(points)100 r2 = self.r2(x, y)101 sin_r2 = torch.sin(r2)102 cos_r2 = torch.cos(r2)103 return (104 x * sin_r2 - y * cos_r2,105 x * cos_r2 + y * sin_r2106 )107class Swirl2(Variation):108 def _forward(self, points):109 r = self.r(points)110 theta = self.theta(points)111 return (112 r * torch.cos(theta + r),113 r * torch.sin(theta + r)114 )115class Horseshoe(Variation):116 def _forward(self, points):117 r = self.r(points)118 theta = self.theta(points)119 return (120 r * torch.cos(2 * theta),121 r * torch.sin(2 * theta)122 )123class Horseshoe2(Variation):124 def _forward(self, points):125 x = self.x(points)126 y = self.y(points)127 r = self.r(x, y)128 return (129 (x - y) * (x + y) / r,130 2 * x * y / r131 )132class Polar(Variation):133 def _forward(self, points):134 return (135 self.theta(points) / np.pi,136 self.r(points) - 1137 )138class Handkerchief(Variation):139 def _forward(self, points):140 r = self.r(points)141 theta = self.theta(points)142 return (143 r * torch.sin(theta + r),144 r * torch.cos(theta - r)145 )146class Heart(Variation):147 def _forward(self, points):148 r = self.r(points)149 theta = self.theta(points)150 return (151 r * torch.sin(theta * r),152 -r * torch.cos(theta * r)153 )154class Spiral(Variation):155 def _forward(self, points):156 r = self.r(points)157 theta = self.theta(points)158 return (159 (torch.cos(theta) + torch.sin(r)) / r,160 (torch.sin(theta) - torch.cos(r)) / r161 )162class Hyperbolic(Variation):163 def _forward(self, points):164 r = self.r(points)165 theta = self.theta(points)166 return (167 torch.sin(theta) / r,168 torch.cos(theta) * r169 )170class Diamond(Variation):171 def _forward(self, points):172 r = self.r(points)173 theta = self.theta(points)174 return (175 torch.sin(theta) * torch.cos(r),176 torch.cos(theta) * torch.sin(r)177 )178class Ex(Variation):179 def _forward(self, points):180 r = self.r(points)181 theta = self.theta(points)182 return (183 r * torch.pow(torch.sin(theta + r), 3),184 r * torch.pow(torch.cos(theta - r), 3)185 )186class Ex2(Variation):187 def _forward(self, points):188 r = self.r(points)189 theta = self.theta(points)190 p0 = torch.sin(theta + r) ** 3191 p1 = torch.cos(theta - r) ** 3192 return (193 r * (p0 + p1),194 r * (p0 - p1)195 )196class Julia(Variation):197 def _forward(self, points):198 sqrt_r = torch.sqrt(self.r(points))199 theta = self.theta(points)200 omega = self.omega(sqrt_r)201 return (202 sqrt_r * torch.cos(theta / 2 + omega),203 sqrt_r * torch.sin(theta / 2 + omega)204 )205 206 207class Disc(Variation):208 def _forward(self, points):209 theta_pi = np.pi * self.theta(points)210 r_pi = np.pi * self.r(points)211 return (212 theta_pi * torch.sin(r_pi),213 theta_pi * torch.cos(r_pi)214 )215 216 217class Bent(Variation):218 def _forward(self, points):219 x = self.x(points)220 y = self.y(points)221 return torch.where(x >= 0, x, 2 * x), torch.where(y >= 0, y, y / 2)222# class Waves(Variation):223# pass224class Fisheye(Variation):225 def _forward(self, points):226 scale = 2 / (self.r(points) + 1)227 return ( # Yes, this ordering is intentional. See Eyefish for the "fixed" version...228 self.y(points) * scale,229 self.x(points) * scale230 )231# class Popcorn(Variation):232# pass233class Exponential(Variation):234 def _forward(self, points):235 x = self.x(points)236 y = self.y(points)237 scale = torch.exp(x - 1)238 return (239 scale * torch.cos(np.pi * y),240 scale * torch.sin(np.pi * y)241 )242class Power(Variation):243 def _forward(self, points):244 x = self.x(points)245 y = self.y(points)246 r = self.r(x, y)247 theta = self.theta(x, y)248 scale = r ** torch.sin(theta)249 return (250 scale * torch.cos(theta),251 scale * torch.sin(theta)252 )253class Cosine(Variation):254 def _forward(self, points):255 x = self.x(points)256 y = self.y(points)257 r = self.r(x, y)258 theta = self.theta(x, y)259 return (260 torch.cos(np.pi * x) * torch.cosh(y),261 -torch.sin(np.pi * x) * torch.sinh(y)262 )263# class Rings(Variation):264# def _forward(self, points):265# x = self.x(points)266# y = self.y(points)267# r = self.r(x, y)268# theta = self.theta(x, y)269# return (270#271# )272# class Fan(Variation):273# def _forward(self, points):274# x = self.x(points)275# y = self.y(points)276# r = self.r(x, y)277# theta = self.theta(x, y)278# return (279#280# )281class Blob(Variation):282 num_p = 3283 def _forward(self, points):284 x = self.x(points)285 y = self.y(points)286 r = self.r(x, y)287 theta = self.theta(x, y)288 high, low, waves = points.new_tensor(self.p)289 scale = r * (low + (high - low) / 2 * (torch.sin(waves * theta) + 1))290 return (291 scale * torch.cos(theta),292 scale * torch.sin(theta)293 )294class PDJ(Variation):295 num_p = 4296 def _forward(self, points):297 x = self.x(points)298 y = self.y(points)299 return (300 torch.sin(self.p[0] * y) - torch.cos(self.p[1] * x),301 torch.sin(self.p[2] * x) - torch.cos(self.p[3] * y)302 )303class Fan2(Variation):304 num_p = 2305 def _forward(self, points):306 x = self.x(points)307 y = self.y(points)308 r = self.r(x, y)309 theta = self.theta(x, y)310 j = np.pi * self.p[0] ** 2311 t = theta + self.p[1] - j * torch.trunc(2 * theta * self.p[1] / j)312 condition = t > j / 2313 theta_plus = theta + j / 2314 theta_minus = theta - j / 2315 return (316 r * torch.where(condition, torch.sin(theta_minus), torch.sin(theta_plus)),317 r * torch.where(condition, torch.cos(theta_minus), torch.cos(theta_plus))318 )319class Rings2(Variation):320 num_p = 1321 def _forward(self, points):322 x = self.x(points)323 y = self.y(points)324 r = self.r(x, y)325 theta = self.theta(x, y)326 p = self.p[0] ** 2327 t = r - 2 * p * torch.trunc((r + p) / (2 * p)) + r * (1 - p)328 return (329 t * torch.sin(theta),330 t * torch.cos(theta)331 )332class Eyefish(Variation):333 def _forward(self, points):334 scale = 2 / (self.r(points) + 1)335 return scale.unsqueeze(1) * points336class Bubble(Variation):337 def _forward(self, points):338 scale = 4 / (self.r2(points) + 4)339 return scale.unsqueeze(1) * points340class Cylinder(Variation):341 def _forward(self, points):342 return (343 torch.sin(self.x(points)),344 self.y(points)345 )346class Perspective(Variation):347 num_p = 2348 def _forward(self, points):349 x = self.x(points)350 y = self.y(points)351 angle, dist = points.new_tensor(self.p)352 scale = dist / (dist - y * torch.sin(angle))353 return (354 scale * x,355 scale * y * torch.cos(angle)356 )357class Noise(Variation):358 def _forward(self, points):359 x = self.x(points)360 y = self.y(points)361 r1 = self.psi(x)362 r2 = self.psi(x)363 return (364 r1 * x * torch.cos(2 * np.pi * r2),365 r1 * y * torch.sin(2 * np.pi * r2)366 )367class JuliaN(Variation):368 num_p = 2369 def _forward(self, points):370 x = self.x(points)371 y = self.y(points)372 r = self.r(x, y)373 power, dist = points.new_tensor(self.p)374 p3 = torch.trunc(torch.abs(power) * self.psi(x))375 t = (self.phi(points) + 2 * np.pi * p3) / power376 scale = r ** (dist / power)377 return (378 scale * torch.cos(t),379 scale * torch.sin(t)380 )381class JuliaScope(Variation):382 num_p = 2383 def _forward(self, points):384 x = self.x(points)385 y = self.y(points)386 r = self.r(x, y)387 power, dist = points.new_tensor(self.p)388 p3 = torch.trunc(torch.abs(power) * self.psi(x))389 t = (self.lambda_(x) * self.phi(points) + 2 * np.pi * p3) / power390 scale = r ** (dist / power)391 return (392 scale * torch.cos(t),393 scale * torch.sin(t)394 )395class Blur(Variation):396 def _forward(self, points):397 x = self.x(points)398 r1 = self.psi(x)399 r2 = self.psi(x)400 return (401 r1 * torch.cos(2 * np.pi * r2),402 r1 * torch.sin(2 * np.pi * r2)403 )404class Gaussian(Variation):405 # I deviate slightly here; they do a gaussian approximation, I'll just use randn406 def _forward(self, points):407 x = self.x(points)408 r1 = torch.randn_like(x)409 r2 = self.psi(x)410 return (411 r1 * torch.cos(2 * np.pi * r2),412 r1 * torch.sin(2 * np.pi * r2)413 )414# class RadialBlur(Variation):415# def _forward(self, points):416# x = self.x(points)417# y = self.y(points)418# r = self.r(x, y)419# theta = self.theta(x, y)420# return (421#422# )423class Pie(Variation):424 num_p = 3425 def _forward(self, points):426 x = self.x(points)427 y = self.y(points)428 t1 = torch.trunc(self.psi(x) * self.p[0] + 0.5)429 t2 = self.p[1] + 2 * np.pi / self.p[0] * (t1 + self.psi(x) * self.p[2])430 scale = self.psi(x)431 return (432 scale * torch.cos(t2),433 scale * torch.sin(t2)434 )435# class NAME(Variation):436# def _forward(self, points):437# x = self.x(points)438# y = self.y(points)439# r = self.r(x, y)440# theta = self.theta(x, y)441# return (442#443# )444#445#446# class NAME(Variation):447# def _forward(self, points):448# x = self.x(points)449# y = self.y(points)450# r = self.r(x, y)451# theta = self.theta(x, y)452# return (453#454# )455#456#457# class NAME(Variation):458# def _forward(self, points):459# x = self.x(points)460# y = self.y(points)461# r = self.r(x, y)462# theta = self.theta(x, y)463# return (464#465# )466#467#468# class NAME(Variation):469# def _forward(self, points):470# x = self.x(points)471# y = self.y(points)472# r = self.r(x, y)473# theta = self.theta(x, y)474# return (475#476# )477#478#479# class NAME(Variation):480# def _forward(self, points):481# x = self.x(points)482# y = self.y(points)483# r = self.r(x, y)484# theta = self.theta(x, y)485# return (486#487# )488#489#490# class NAME(Variation):491# def _forward(self, points):492# x = self.x(points)493# y = self.y(points)494# r = self.r(x, y)495# theta = self.theta(x, y)496# return (497#498# )499#500#501# class NAME(Variation):502# def _forward(self, points):503# x = self.x(points)504# y = self.y(points)505# r = self.r(x, y)506# theta = self.theta(x, y)507# return (508#509# )510#511#512# class NAME(Variation):513# def _forward(self, points):514# x = self.x(points)515# y = self.y(points)516# r = self.r(x, y)517# theta = self.theta(x, y)518# return (519#520# )521#522#523# class NAME(Variation):524# def _forward(self, points):525# x = self.x(points)526# y = self.y(points)527# r = self.r(x, y)528# theta = self.theta(x, y)529# return (530#531# )532#533#534# class NAME(Variation):535# def _forward(self, points):536# x = self.x(points)537# y = self.y(points)538# r = self.r(x, y)539# theta = self.theta(x, y)540# return (541#542# )543#544#545# class NAME(Variation):546# def _forward(self, points):547# x = self.x(points)548# y = self.y(points)549# r = self.r(x, y)550# theta = self.theta(x, y)551# return (552#553# )554if __name__ == '__main__':555 from . import grid_plot556 import argparse557 parser = argparse.ArgumentParser()558 parser.add_argument('variation', choices=list(sorted(Variation.all_variations.keys())))559 args = parser.parse_args()560 variation = Variation.all_variations[args.variation]()...

Full Screen

Full Screen

start_points.py

Source:start_points.py Github

copy

Full Screen

...17 self.linear = nn.Linear(1024, vocab_size)18 self.device = device19 def forward(self, input, gts=None):20 if self.training:21 return self._forward(input, gts)22 else:23 with torch.no_grad():24 return self._forward(input, gts)25 def _forward(self, input, gts=None):26 cnn_output = self.cnn(input)27 _, hidden = self.encoder(cnn_output) # width, batch, alphabet28 _, b, _ = hidden[0].shape29 outputs = []30 output = torch.zeros((1, b, 1024)).to(self.device)31 for i in range(MAX_LENGTH):32 output, hidden = self.decoder(output, hidden)33 #output = nn.functional.relu(output)34 outputs.append(self.linear(output))35 # sigmoids are done in the loss36 outputs = torch.cat(outputs, dim=0)37 return outputs38class StartPointModel2(nn.Module):39 def __init__(self, vocab_size=3, device="cuda", cnn_type="default", first_conv_op=CoordConv, first_conv_opts=None, **kwargs):40 super().__init__()41 self.__dict__.update(kwargs)42 if first_conv_op:43 first_conv_op = CoordConv44 self.decoder_size = 25645 self.cnn = CNN(nc=1, first_conv_op=first_conv_op, cnn_type=cnn_type, first_conv_opts=first_conv_opts)46 self.encoder = nn.LSTM(input_size=1024, hidden_size=self.decoder_size, bidirectional=True, dropout=.5, num_layers=1)47 self.decoder = nn.LSTM(input_size=self.decoder_size, hidden_size=self.decoder_size, num_layers=2, dropout=.5)48 self.linear = nn.Linear(self.decoder_size, vocab_size)49 self.device = device50 def forward(self, input, gts=None):51 if self.training:52 return self._forward(input, gts)53 else:54 with torch.no_grad():55 return self._forward(input, gts)56 def _forward(self, input, gts=None):57 cnn_output = self.cnn(input)58 _, hidden = self.encoder(cnn_output) # width, batch, alphabet59 _, b, _ = hidden[0].shape60 outputs = []61 output = torch.zeros((1, b, self.decoder_size)).to(self.device)62 for i in range(MAX_LENGTH):63 output, hidden = self.decoder(output, hidden)64 #output = nn.functional.relu(output)65 outputs.append(self.linear(output))66 # sigmoids are done in the loss67 outputs = torch.cat(outputs, dim=0)68 return outputs69class StartPointAttnModel(nn.Module):70 def __init__(self, vocab_size=3, device="cuda", cnn_type="default", first_conv_op=CoordConv, first_conv_opts=None, **kwargs):71 super().__init__()72 self.__dict__.update(kwargs)73 if first_conv_op:74 first_conv_op = CoordConv75 self.cnn = CNN(nc=1, first_conv_op=first_conv_op, cnn_type=cnn_type, first_conv_opts=first_conv_opts)76 self.encoder = nn.LSTM(input_size=1024, hidden_size=256)77 self.attn = nn.MultiheadAttention(embed_dim=256, num_heads=1)78 self.decoder = nn.LSTM(input_size=512, hidden_size=256, num_layers=1)79 self.linear = nn.Linear(256, vocab_size)80 self.device = device81 def forward(self, input, gts=None):82 if self.training:83 return self._forward(input, gts)84 else:85 with torch.no_grad():86 return self._forward(input, gts)87 def _forward(self, input, gts=None):88 cnn_output = self.cnn(input)89 encoding, hidden = self.encoder(cnn_output) # width, batch, alphabet90 _, b, _ = hidden[0].shape91 outputs = []92 output = torch.zeros((1, b, 256)).to(self.device)93 for i in range(MAX_LENGTH):94 context, _ = self.attn(output, encoding, encoding)95 output, hidden = self.decoder(torch.cat([output, context], dim=-1), hidden)96 output = nn.functional.relu(output)97 outputs.append(self.linear(output))98 # sigmoids are done in the loss99 outputs = torch.cat(outputs, dim=0)100 return outputs101class StartPointAttnModelDeep(nn.Module):102 def __init__(self, vocab_size=3, device="cuda", cnn_type="default", first_conv_op=CoordConv, first_conv_opts=None, **kwargs):103 super().__init__()104 self.__dict__.update(kwargs)105 if first_conv_op:106 first_conv_op = CoordConv107 self.decoder_layers = 2108 self.encoder_layers = 2109 self.context_dim = 128110 self.vocab_size = vocab_size111 self.decoder_dim = 128112 self.cnn = CNN(nc=1, first_conv_op=first_conv_op, cnn_type=cnn_type, first_conv_opts=first_conv_opts)113 self.encoder = nn.LSTM(input_size=1024, hidden_size=128, num_layers=self.encoder_layers)114 self.attn = nn.MultiheadAttention(embed_dim=128, num_heads=4)115 self.decoder = nn.LSTM(input_size=self.context_dim+self.decoder_dim, hidden_size=self.decoder_dim, num_layers=self.decoder_layers)116 self.linear = nn.Linear(self.decoder_dim, vocab_size)117 self.device = device118 def forward(self, input, gts=None):119 if self.training:120 return self._forward(input, gts)121 else:122 with torch.no_grad():123 return self._forward(input, gts)124 def _forward(self, input, gts=None):125 cnn_output = self.cnn(input)126 encoding, hidden = self.encoder(cnn_output) # width, batch, alphabet127 _, b, _ = hidden[0].shape128 outputs = []129 output = torch.zeros((1, b, self.decoder_dim)).to(self.device)130 hidden = 2 * [torch.zeros((2, b, self.decoder_dim)).to(self.device)]131 for i in range(MAX_LENGTH):132 context, _ = self.attn(output, encoding, encoding)133 output, hidden = self.decoder(torch.cat([output, context], dim=-1), hidden)134 output = nn.functional.relu(output)135 outputs.append(self.linear(output))136 # sigmoids are done in the loss137 outputs = torch.cat(outputs, dim=0)138 return outputs139class StartPointAttnModelFull(nn.Module):140 def __init__(self, vocab_size=4, device="cuda", cnn_type="default", first_conv_op=CoordConv, first_conv_opts=None, **kwargs):141 super().__init__()142 self.__dict__.update(kwargs)143 if first_conv_op:144 first_conv_op = CoordConv145 self.decoder_layers = 2146 self.encoder_layers = 2147 self.context_dim = 128148 self.vocab_size = vocab_size149 self.decoder_dim = 128150 self.cnn = CNN(nc=1, first_conv_op=first_conv_op, cnn_type=cnn_type, first_conv_opts=first_conv_opts)151 self.encoder = nn.LSTM(input_size=1024, hidden_size=128, num_layers=self.encoder_layers)152 self.attn = nn.MultiheadAttention(embed_dim=128, num_heads=4)153 self.decoder = nn.LSTM(input_size=self.context_dim+vocab_size, hidden_size=self.decoder_dim, num_layers=self.decoder_layers)154 self.linear = nn.Linear(self.context_dim+self.decoder_dim, vocab_size)155 self.device = device156 def forward(self, input, gts=None):157 if self.training:158 return self._forward(input, gts)159 else:160 with torch.no_grad():161 return self._forward(input, gts)162 def _forward(self, input, gts=None):163 print(input.shape)164 cnn_output = self.cnn(input)165 encoding, hidden = self.encoder(cnn_output) # width, batch, alphabet166 #assert encoding[-1,-1,-1] == hidden[0][-1,-1,-1])167 # https://stackoverflow.com/questions/48302810/whats-the-difference-between-hidden-and-output-in-pytorch-lstm168 _, b, _ = encoding.shape169 outputs = []170 hidden = [torch.zeros(2, b, self.decoder_dim).to(self.device)] * self.decoder_layers171 y = torch.zeros((1, b, self.vocab_size)).to(self.device)172 c_t = torch.zeros((1, b, self.context_dim)).to(self.device)173 for i in range(MAX_LENGTH):174 s1, hidden = self.decoder(torch.cat([y, c_t], dim=-1), hidden)175 c_t, _ = self.attn(s1, encoding, encoding)176 y = self.linear(torch.cat([s1, c_t], dim=-1))...

Full Screen

Full Screen

forwarding_canvas.py

Source:forwarding_canvas.py Github

copy

Full Screen

1from .canvas import Canvas2def _forward(method):3 def func(self, *args, **kwargs):4 # pylint: disable=W02125 return getattr(self._canvas, method)(*args, **kwargs)6 func.name = method7 return func8class ForwardingCanvas(Canvas):9 """10 Canvas that dispatches all calls to a contained canvas11 """12 def __init__(self, canvas):13 super().__init__(canvas.width, canvas.height)14 self._canvas = canvas15 @property16 def turtle(self):17 return self._canvas.turtle18 @turtle.setter19 def turtle(self, turtle):20 self._canvas.turtle = turtle21 def set_canvas(self, canvas):22 canvas.turtle = self._canvas.turtle23 self._canvas = canvas24 self.width = canvas.width25 self.height = canvas.height26 draw_rectangular_line = _forward("draw_rectangular_line")27 draw_circle = _forward("draw_circle")28 fill_path = _forward("fill_path")29 axis_aligned_rectangle = _forward("axis_aligned_rectangle")30 set_bgcolor = _forward("set_bgcolor")31 clear = _forward("clear")32 refreshed_turtle = _forward("refreshed_turtle")33 set_speed = _forward("set_speed")...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fMBT automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful