How to use init_a method in hypothesis

Best Python code snippet using hypothesis

lopt.py

Source:lopt.py Github

copy

Full Screen

1from multiprocessing import Pool2from indigox.config import INIT_WITH_GA, NUM_PROCESSES3from indigox.exception import IndigoUnfeasibleComputation4from indigox.misc import (BondOrderAssignment, graph_to_dist_graph, electron_spots,5 electrons_to_add, locs_sort, HashBitArray, graph_setup,6 node_energy, bitarray_to_assignment)7class LocalOptimisation(BondOrderAssignment):8 def __init__(self, G):9 self.init_G = G10 11 def initialise(self):12 self.G = graph_to_dist_graph(self.init_G)13 self.target = electrons_to_add(self.init_G)14 self.locs = locs_sort(electron_spots(self.init_G), self.G)15 if INIT_WITH_GA:16 self.init_a = HashBitArray(len(self.locs))17 self.init_a.setall(False)18 base_energy = self.calc_energy(self.init_a)[1]19 all_locs = list(range(len(self.locs)))20 while self.init_a.count() < self.target:21 energy_diffs = {}22 for i in all_locs:23 self.init_a[i] = True24 energy_diffs[i] = (self.calc_energy(self.init_a)[1]25 - base_energy)26 self.init_a[i] = False27 min_i = min(energy_diffs, key=lambda x: energy_diffs[x])28 self.init_a[min_i] = True29 base_energy += energy_diffs[min_i]30 all_locs.remove(min_i)31 else:32 self.init_a = HashBitArray(len(self.locs))33 self.init_a.setall(False)34 self.init_a[:self.target] = True35 if self.init_a.count() != self.target:36 raise IndigoUnfeasibleComputation('Can only optimised when all '37 'electrons are placed in the initial guess.')38 39 def run(self):40 self.initialise()41 min_ene = self.calc_energy(self.init_a)[1]42 seen = {self.init_a : min_ene}43 current_min = [self.init_a]44 min_round = min_ene + 1 # So the while loop is entered, 45 round_mins = current_min[:] # regardless of min_ene value.46 pool = Pool(processes=NUM_PROCESSES)47 while abs(min_round - min_ene) > 1e-10:48 min_ene = min_round49 current_min = round_mins[:]50 a = current_min[0]51 results = pool.imap_unordered(self.calc_energy, 52 (n for n in self.neighbours(a)53 if n not in seen),54 chunksize=8)55 for n, n_ene in results:56 seen[n] = n_ene57 if n_ene - min_round < -1e-10:58 min_round = n_ene59 round_mins = [n]60 elif -1e-10 < n_ene - min_round < 1e-10:61 round_mins.append(n)62 pool.terminate()63 bitarray_to_assignment(self.init_G, current_min[0], self.locs)64 return self.init_G, seen[current_min[0]]65 def calc_energy(self, a):66 graph_setup(self.G, a, self.locs)67 68 ene = sum(node_energy(self.G, n) for n in self.G)69 return a, round(ene, 5)70 71 def neighbours(self, a):72 for source in set(self.locs):73 i = self.locs.index(source)74 i_count = self.locs.count(source)75 i_loc = i + a[i:i+i_count].count() - 176 if not a[i:i+i_count].count():77 continue78 for target in set(self.locs):79 if source == target:80 continue81 j = self.locs.index(target)82 j_count = self.locs.count(target)83 j_loc = j + a[j:j+j_count].count()84 if j_count == a[j:j+j_count].count():85 continue86 b = a.copy()87 b[i_loc] = False88 b[j_loc] = True89 yield b90 ...

Full Screen

Full Screen

dyrelu.py

Source:dyrelu.py Github

copy

Full Screen

1import torch2import torch.nn as nn3import torch.nn.functional as F4def _make_divisible(v, divisor, min_value=None):5 if min_value is None:6 min_value = divisor7 new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)8 # Make sure that round down does not go down by more than 10%.9 if new_v < 0.9 * v:10 new_v += divisor11 return new_v12class swish(nn.Module):13 def forward(self, x):14 return x * torch.sigmoid(x)15class h_swish(nn.Module):16 def __init__(self, inplace=False):17 super(h_swish, self).__init__()18 self.inplace = inplace19 def forward(self, x):20 return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.021class h_sigmoid(nn.Module):22 def __init__(self, inplace=True, h_max=1):23 super(h_sigmoid, self).__init__()24 self.relu = nn.ReLU6(inplace=inplace)25 self.h_max = h_max26 def forward(self, x):27 return self.relu(x + 3) * self.h_max / 628class DYReLU(nn.Module):29 def __init__(self, inp, oup, reduction=4, lambda_a=1.0, K2=True, use_bias=True, use_spatial=False,30 init_a=[1.0, 0.0], init_b=[0.0, 0.0]):31 super(DYReLU, self).__init__()32 self.oup = oup33 self.lambda_a = lambda_a * 234 self.K2 = K235 self.avg_pool = nn.AdaptiveAvgPool2d(1)36 self.use_bias = use_bias37 if K2:38 self.exp = 4 if use_bias else 239 else:40 self.exp = 2 if use_bias else 141 self.init_a = init_a42 self.init_b = init_b43 # determine squeeze44 if reduction == 4:45 squeeze = inp // reduction46 else:47 squeeze = _make_divisible(inp // reduction, 4)48 # print('reduction: {}, squeeze: {}/{}'.format(reduction, inp, squeeze))49 # print('init_a: {}, init_b: {}'.format(self.init_a, self.init_b))50 self.fc = nn.Sequential(51 nn.Linear(inp, squeeze),52 nn.ReLU(inplace=True),53 nn.Linear(squeeze, oup * self.exp),54 h_sigmoid()55 )56 if use_spatial:57 self.spa = nn.Sequential(58 nn.Conv2d(inp, 1, kernel_size=1),59 nn.BatchNorm2d(1),60 )61 else:62 self.spa = None63 def forward(self, x):64 if isinstance(x, list):65 x_in = x[0]66 x_out = x[1]67 else:68 x_in = x69 x_out = x70 b, c, h, w = x_in.size()71 y = self.avg_pool(x_in).view(b, c)72 y = self.fc(y).view(b, self.oup * self.exp, 1, 1)73 if self.exp == 4:74 a1, b1, a2, b2 = torch.split(y, self.oup, dim=1)75 a1 = (a1 - 0.5) * self.lambda_a + self.init_a[0] # 1.076 a2 = (a2 - 0.5) * self.lambda_a + self.init_a[1]77 b1 = b1 - 0.5 + self.init_b[0]78 b2 = b2 - 0.5 + self.init_b[1]79 out = torch.max(x_out * a1 + b1, x_out * a2 + b2)80 elif self.exp == 2:81 if self.use_bias: # bias but not PL82 a1, b1 = torch.split(y, self.oup, dim=1)83 a1 = (a1 - 0.5) * self.lambda_a + self.init_a[0] # 1.084 b1 = b1 - 0.5 + self.init_b[0]85 out = x_out * a1 + b186 else:87 a1, a2 = torch.split(y, self.oup, dim=1)88 a1 = (a1 - 0.5) * self.lambda_a + self.init_a[0] # 1.089 a2 = (a2 - 0.5) * self.lambda_a + self.init_a[1]90 out = torch.max(x_out * a1, x_out * a2)91 elif self.exp == 1:92 a1 = y93 a1 = (a1 - 0.5) * self.lambda_a + self.init_a[0] # 1.094 out = x_out * a195 if self.spa:96 ys = self.spa(x_in).view(b, -1)97 ys = F.softmax(ys, dim=1).view(b, 1, h, w) * h * w98 ys = F.hardtanh(ys, 0, 3, inplace=True)/399 out = out * ys...

Full Screen

Full Screen

analyse_adder_heuristic_design_lsb.py

Source:analyse_adder_heuristic_design_lsb.py Github

copy

Full Screen

1# Analyse heuristic design for adders2# ===================================3#4# Author: Petr Dvoracek <xdvora0n@stud.fit.vutbr.cz>5# Date: 24th October 20146# Filename: analyse_adder_heuristic_design.py7# Description: The script analyses an error bits when we use smaller adders8# as a seed for a bigger adder. The output of this script is a table.9# todo check z liche sudou10import sys11MIN_OPERAND_SIZE = 312MAX_OPERAND_SIZE = 813def compute_error(init_a, init_b):14 error = 015 shift = init_a 16 sad = 0 17 maximal = (2**(init_a +init_b+1) * 2**(init_b+1))18 # Nemusime pocitat polovinu starych veci, jsou stejne jak minule.19 for a in xrange(2**init_a): 20 for b in xrange(2**init_b):21 error += bin(((a+b)<<1) ^ (b+ ((a << 1) | 1))).count('1')22 sad += abs(((a+b)<<1) - (b+ ((a << 1) | 1)))23 #print bin(a) + " "+ bin(b) + " => " + bin(a+b) + " || " + bin(a | 1<< shift) +" "+bin(b + (a | 1<< shift )) + " XORED " +bin((a+b) ^ (b+ (a | 1<< shift )))24 max_fitness = (init_a+2) * 2**(init_a + init_b + 1)25 init_fitness = max_fitness - error26 error_rate = float(init_fitness) / float(max_fitness)27 result = str(init_a) + "+" + str(init_b) + " => "28 if init_a == init_b:29 result += str(init_a) + "+" + str(init_b+1) + " "30 else:31 result += str(init_a+1) + "+" + str(init_b) + " "32 result += str(max_fitness) +" "+ str(error) +" "+ str(init_fitness) + " " +str(error_rate)+ " SAD ["+str(sad)+"] -> " + str(float(sad)/ float(maximal)) 33 return result34for operand_bits in xrange(MIN_OPERAND_SIZE, MAX_OPERAND_SIZE):35 print compute_error(operand_bits, operand_bits)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful