How to use function1 method in tempest

Best Python code snippet using tempest_python

library_functions.py

Source:library_functions.py Github

copy

Full Screen

1import torch2import torch.nn as nn3from .neural_functions import init_neural_function, HeuristicNeuralFunction4# TODO allow user to choose device5if torch.cuda.is_available():6 device = 'cuda:0'7else:8 device = 'cpu'9import pdb10class LibraryFunction:11 def __init__(self, submodules, input_type, output_type, input_size, output_size, num_units, name="", has_params=False,12 sub_grammar_symbols=None):13 self.submodules = submodules14 self.input_type = input_type15 self.output_type = output_type16 self.input_size = input_size17 self.output_size = output_size18 self.num_units = num_units19 self.name = name20 self.has_params = has_params21 if self.has_params:22 assert "init_params" in dir(self)23 self.init_params()24 self.sub_grammar_symbols = sub_grammar_symbols25 def get_submodules(self):26 return self.submodules27 def set_submodules(self, new_submodules):28 self.submodules = new_submodules29 def get_typesignature(self):30 return self.input_type, self.output_type31 def get_sub_grammar_symbols(self):32 return self.sub_grammar_symbols33class StartFunction(LibraryFunction):34 def __init__(self, input_type, output_type, input_size, output_size, num_units, root_symbol=None):35 self.program = init_neural_function(input_type, output_type, input_size, output_size, num_units)36 submodules = { 'program' : self.program }37 grammar_symbols = {'program': root_symbol }38 super().__init__(submodules, input_type, output_type, input_size, output_size, num_units, name="Start",39 sub_grammar_symbols=grammar_symbols)40 def execute_on_batch(self, batch, batch_lens=None, batch_output=None, is_sequential=False):41 return self.submodules["program"].execute_on_batch(batch, batch_lens)42class FoldFunction(LibraryFunction):43 def __init__(self, input_size, output_size, num_units, fold_function=None):44 #TODO: will this accumulator require a grad?45 self.accumulator = torch.zeros(output_size)46 if fold_function is None:47 fold_function = init_neural_function("atom", "atom", input_size+output_size, output_size, num_units)48 submodules = { "foldfunction" : fold_function }49 super().__init__(submodules, "list", "atom", input_size, output_size, num_units, name="Fold")50 # ask edge to do the iterative part51 def execute_on_batch(self, batch, batch_lens=None, is_sequential=False):52 assert len(batch.size()) == 353 prog = self.submodules["foldfunction"]54 # call self55 if issubclass(type(prog), HeuristicNeuralFunction) or issubclass(type(prog), LibraryFunction):56 fold_out = self.execute_self(batch)57 # edge to solve58 else:59 fold_out = prog.execute_on_batch(batch, isfold=True, foldaccumulator=self.accumulator)60 # sequential61 if not is_sequential:62 idx = torch.tensor(batch_lens).to(device) - 163 idx = idx.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, fold_out.size(-1))64 fold_out = fold_out.gather(1, idx).squeeze(1)65 return fold_out66 # if submodule is not edge67 def execute_self(self, batch):68 assert len(batch.size()) == 369 batch_size, seq_len, feature_dim = batch.size()70 batch = batch.transpose(0,1) # (seq_len, batch_size, feature_dim)71 fold_out = []72 folded_val = self.accumulator.clone().detach().requires_grad_(True)73 folded_val = folded_val.unsqueeze(0).repeat(batch_size,1).to(device)74 for t in range(seq_len):75 features = batch[t]76 out_val = self.submodules["foldfunction"].execute_on_batch(torch.cat([features, folded_val], dim=1))77 fold_out.append(out_val.unsqueeze(1))78 folded_val = out_val79 fold_out = torch.cat(fold_out, dim=1)80 return fold_out81class MapFunction(LibraryFunction):82 def __init__(self, input_size, output_size, num_units, map_function=None):83 if map_function is None:84 map_function = init_neural_function("atom", "atom", input_size, output_size, num_units)85 submodules = { "mapfunction" : map_function }86 super().__init__(submodules, "list", "list", input_size, output_size, num_units, name="Map")87 def execute_on_batch(self, batch, batch_lens=None):88 assert len(batch.size()) == 389 batch_size, seq_len, feature_dim = batch.size()90 map_input = batch.view(-1, feature_dim)91 map_output = self.submodules["mapfunction"].execute_on_batch(map_input)92 return map_output.view(batch_size, seq_len, -1)93class MapPrefixesFunction(LibraryFunction):94 def __init__(self, input_size, output_size, num_units, map_function=None):95 if map_function is None:96 map_function = init_neural_function("list", "atom", input_size, output_size, num_units)97 submodules = { "mapfunction" : map_function }98 super().__init__(submodules, "list", "list", input_size, output_size, num_units, name="MapPrefixes")99 def execute_on_batch(self, batch, batch_lens):100 assert len(batch.size()) == 3101 map_output = self.submodules["mapfunction"].execute_on_batch(batch, batch_lens, is_sequential=True)102 assert len(map_output.size()) == 3103 return map_output104class ITE(LibraryFunction):105 """(Smoothed) If-The-Else."""106 def __init__(self, input_type, output_type, input_size, output_size, num_units, eval_function=None, function1=None, function2=None, beta=1.0, name="ITE", simple=False):107 if eval_function is None:108 if simple:109 eval_function = init_neural_function(input_type, "atom", input_size, 1, num_units)110 else:111 eval_function = init_neural_function(input_type, "atom", input_size, output_size, num_units)112 if function1 is None:113 function1 = init_neural_function(input_type, output_type, input_size, output_size, num_units)114 if function2 is None:115 function2 = init_neural_function(input_type, output_type, input_size, output_size, num_units)116 submodules = { "evalfunction" : eval_function, "function1" : function1, "function2" : function2 }117 self.bsmooth = nn.Sigmoid()118 self.beta = beta119 self.simple = simple # the simple version of ITE evaluates the same function for all dimensions of the output120 super().__init__(submodules, input_type, output_type, input_size, output_size, num_units, name=name)121 def execute_on_batch(self, batch, batch_lens=None, is_sequential=False):122 if self.input_type == 'list':123 assert len(batch.size()) == 3124 assert batch_lens is not None125 else:126 assert len(batch.size()) == 2127 if is_sequential:128 predicted_eval = self.submodules["evalfunction"].execute_on_batch(batch, batch_lens, is_sequential=False)129 predicted_function1 = self.submodules["function1"].execute_on_batch(batch, batch_lens, is_sequential=is_sequential)130 predicted_function2 = self.submodules["function2"].execute_on_batch(batch, batch_lens, is_sequential=is_sequential)131 else:132 predicted_eval = self.submodules["evalfunction"].execute_on_batch(batch, batch_lens)133 predicted_function1 = self.submodules["function1"].execute_on_batch(batch, batch_lens)134 predicted_function2 = self.submodules["function2"].execute_on_batch(batch, batch_lens)135 gate = self.bsmooth(predicted_eval*self.beta)136 if self.simple:137 gate = gate.repeat(1, self.output_size)138 if self.get_typesignature() == ('list', 'list'):139 gate = gate.unsqueeze(1).repeat(1, batch.size(1), 1)140 elif self.get_typesignature() == ('list', 'atom') and is_sequential:141 gate = gate.unsqueeze(1).repeat(1, batch.size(1), 1)142 assert gate.size() == predicted_function2.size() == predicted_function1.size()143 ite_result = gate*predicted_function1 + (1.0 - gate)*predicted_function2144 return ite_result145class SimpleITE(ITE):146 """The simple version of ITE evaluates one function for all dimensions of the output."""147 def __init__(self, input_type, output_type, input_size, output_size, num_units, eval_function=None, function1=None, function2=None, beta=1.0):148 super().__init__(input_type, output_type, input_size, output_size, num_units,149 eval_function=eval_function, function1=function1, function2=function2, beta=beta, name="SimpleITE", simple=True)150class MultiplyFunction(LibraryFunction):151 def __init__(self, input_size, output_size, num_units, function1=None, function2=None):152 if function1 is None:153 function1 = init_neural_function("atom", "atom", input_size, output_size, num_units)154 if function2 is None:155 function2 = init_neural_function("atom", "atom", input_size, output_size, num_units)156 submodules = { "function1" : function1, "function2" : function2 }157 super().__init__(submodules, "atom", "atom", input_size, output_size, num_units, name="Multiply")158 def execute_on_batch(self, batch, batch_lens=None):159 assert len(batch.size()) == 2160 predicted_function1 = self.submodules["function1"].execute_on_batch(batch)161 predicted_function2 = self.submodules["function2"].execute_on_batch(batch)162 return predicted_function1 * predicted_function2163class AddFunction(LibraryFunction):164 def __init__(self, input_size, output_size, num_units, function1=None, function2=None):165 if function1 is None:166 function1 = init_neural_function("atom", "atom", input_size, output_size, num_units)167 if function2 is None:168 function2 = init_neural_function("atom", "atom", input_size, output_size, num_units)169 submodules = { "function1": function1, "function2": function2 }170 super().__init__(submodules, "atom", "atom", input_size, output_size, num_units, name="Add")171 def execute_on_batch(self, batch, batch_lens=None):172 assert len(batch.size()) == 2173 predicted_function1 = self.submodules["function1"].execute_on_batch(batch)174 predicted_function2 = self.submodules["function2"].execute_on_batch(batch)175 return predicted_function1 + predicted_function2176class ContinueFunction(LibraryFunction):177 def __init__(self, input_size, output_size, num_units, fxn=None):178 if fxn is None:179 fxn = init_neural_function("atom", "atom", input_size, output_size, num_units)180 submodules = { "fxn" : fxn }181 super().__init__(submodules, "atom", "atom", input_size, output_size, num_units, name="")182 def execute_on_batch(self, batch, batch_lens=None):183 assert len(batch.size()) == 2184 fxn_out = self.submodules["fxn"].execute_on_batch(batch)185 return fxn_out186class LearnedConstantFunction(LibraryFunction):187 def __init__(self, input_size, output_size, num_units):188 super().__init__({}, "atom", "atom", input_size, output_size, num_units, name="LearnedConstant", has_params=True)189 def init_params(self):190 self.parameters = { "constant" : torch.rand(self.output_size, requires_grad=True, device=device) }191 def execute_on_batch(self, batch, batch_lens=None):192 return self.parameters["constant"].unsqueeze(0).repeat(batch.size(0), 1)193class AffineFunction(LibraryFunction):194 def __init__(self, raw_input_size, selected_input_size, output_size, num_units, name="Affine"):195 self.selected_input_size = selected_input_size196 super().__init__({}, "atom", "atom", raw_input_size, output_size, num_units, name=name, has_params=True)197 def init_params(self):198 self.linear_layer = nn.Linear(self.selected_input_size, self.output_size, bias=True).to(device)199 self.parameters = {200 "weights" : self.linear_layer.weight,201 "bias" : self.linear_layer.bias202 }203 def execute_on_batch(self, batch, batch_lens=None):204 assert len(batch.size()) == 2205 return self.linear_layer(batch)206class AffineFeatureSelectionFunction(AffineFunction):207 def __init__(self, input_size, output_size, num_units, name="AffineFeatureSelection"):208 assert hasattr(self, "full_feature_dim")209 assert input_size >= self.full_feature_dim210 if self.full_feature_dim == 0:211 self.is_full = True212 self.full_feature_dim = input_size213 else:214 self.is_full = False215 additional_inputs = input_size - self.full_feature_dim216 assert hasattr(self, "feature_tensor")217 assert len(self.feature_tensor) <= input_size218 self.feature_tensor = self.feature_tensor.to(device)219 super().__init__(raw_input_size=input_size, selected_input_size=self.feature_tensor.size()[-1]+additional_inputs,220 output_size=output_size, num_units=num_units, name=name)221 def init_params(self):222 self.raw_input_size = self.input_size223 if self.is_full:224 self.full_feature_dim = self.input_size225 self.feature_tensor = torch.arange(self.input_size).to(device)226 additional_inputs = self.raw_input_size - self.full_feature_dim227 self.selected_input_size = self.feature_tensor.size()[-1] + additional_inputs228 self.linear_layer = nn.Linear(self.selected_input_size, self.output_size, bias=True).to(device)229 self.parameters = {230 "weights" : self.linear_layer.weight,231 "bias" : self.linear_layer.bias232 }233 def execute_on_batch(self, batch, batch_lens=None):234 assert len(batch.size()) == 2235 features = torch.index_select(batch, 1, self.feature_tensor)236 remaining_features = batch[:,self.full_feature_dim:]237 return self.linear_layer(torch.cat([features, remaining_features], dim=-1))238class FullInputAffineFunction(AffineFeatureSelectionFunction):239 def __init__(self, input_size, output_size, num_units):240 self.full_feature_dim = 0 # this will indicate additional_inputs = 0 in FeatureSelectionFunction241 self.feature_tensor = torch.arange(input_size) # selects all features by default242 super().__init__(input_size, output_size, num_units, name="FullFeatureSelect")243class LogicAndFunction(LibraryFunction):244 def __init__(self, input_size, output_size, num_units, sem, *func_syms, function1=None, function2=None):245 if function1 is None:246 function1 = init_neural_function("atom", "atom", input_size, output_size, num_units)247 if function2 is None:248 function2 = init_neural_function("atom", "atom", input_size, output_size, num_units)249 if sem == "arith":250 self.arith = True # arith semantics approximation251 else:252 self.arith = False # min/max semantics approximation253 submodules = { "function1": function1, "function2": function2 }254 grammar_symbols = {"function1": func_syms[0], "function2": func_syms[1] }255 super().__init__(submodules, "atom", "atom", input_size, output_size, num_units, name="and",256 sub_grammar_symbols=grammar_symbols)257 def execute_on_batch(self, batch, batch_lens=None):258 assert len(batch.size()) == 2259 #print (f'batch_and = {batch} and batch_lens = {batch.size()}')260 #print (f'type(self.submodules["function1"]) = {self.submodules["function1"]}')261 #print (f'type(self.submodules["function2"]) = {self.submodules["function2"]}')262 predicted_function1 = self.submodules["function1"].execute_on_batch(batch)263 predicted_function2 = self.submodules["function2"].execute_on_batch(batch)264 if self.arith:265 return predicted_function1 * predicted_function2266 else:267 return torch.min(predicted_function1, predicted_function2).to(device)268class LogicOrFunction(LibraryFunction):269 def __init__(self, input_size, output_size, num_units, sem, *func_syms, function1=None, function2=None):270 if function1 is None:271 function1 = init_neural_function("atom", "atom", input_size, output_size, num_units)272 if function2 is None:273 function2 = init_neural_function("atom", "atom", input_size, output_size, num_units)274 if sem == "arith":275 self.arith = True # arith semantics approximation276 else:277 self.arith = False # min/max semantics approximation278 submodules = { "function1": function1, "function2": function2 }279 grammar_symbols = {"function1": func_syms[0], "function2": func_syms[1] }280 super().__init__(submodules, "atom", "atom", input_size, output_size, num_units, name="or",281 sub_grammar_symbols=grammar_symbols)282 def execute_on_batch(self, batch, batch_lens=None):283 assert len(batch.size()) == 2284 #print (f'batch_or = {batch} and batch_lens = {batch.size()}')285 predicted_function1 = self.submodules["function1"].execute_on_batch(batch)286 predicted_function2 = self.submodules["function2"].execute_on_batch(batch)287 if self.arith:288 return predicted_function1 + predicted_function2 - predicted_function1 * predicted_function2289 else:290 return torch.max(predicted_function1, predicted_function2).to(device)291class LogicXorFunction(LibraryFunction):292 def __init__(self, input_size, output_size, num_units, sem, *func_syms, function1=None, function2=None):293 if function1 is None:294 function1 = init_neural_function("atom", "atom", input_size, output_size, num_units)295 if function2 is None:296 function2 = init_neural_function("atom", "atom", input_size, output_size, num_units)297 if sem == "arith":298 self.arith = True # arith semantics approximation299 else:300 self.arith = False # min/max semantics approximation301 submodules = { "function1": function1, "function2": function2 }302 grammar_symbols = {"function1": func_syms[0], "function2": func_syms[1] }303 super().__init__(submodules, "atom", "atom", input_size, output_size, num_units, name="xor",304 sub_grammar_symbols=grammar_symbols)305 def execute_on_batch(self, batch, batch_lens=None):306 assert len(batch.size()) == 2307 #print (f'batch_xor = {batch} and batch_lens = {batch.size()}')308 predicted_function1 = self.submodules["function1"].execute_on_batch(batch)309 predicted_function2 = self.submodules["function2"].execute_on_batch(batch)310 if self.arith:311 return (predicted_function1 + predicted_function2 - 2 * predicted_function1 * predicted_function2).to(device)312 else:313 return torch.max(torch.min(predicted_function1, 1-predicted_function2).to(device), torch.min(1-predicted_function1, predicted_function2).to(device)).to(device)314class LogicNotFunction(LibraryFunction):315 def __init__(self, input_size, output_size, num_units, sem, *func_syms, function=None):316 if function is None:317 function = init_neural_function("atom", "atom", input_size, output_size, num_units)318 submodules = {"function": function}319 grammar_symbols = {"function": func_syms[0]}320 super().__init__(submodules, "atom", "atom", input_size, output_size, num_units, name="not",321 sub_grammar_symbols=grammar_symbols)322 def execute_on_batch(self, batch, batch_lens=None):323 assert len(batch.size()) == 2324 #print (f'batch_not = {batch} and batch_lens = {batch.size()}')325 predicted_function = self.submodules["function"].execute_on_batch(batch)326 return (1 - predicted_function).to(device)327class VarSelFunction(LibraryFunction):328 def __init__(self, input_size, output_size, num_units, var_select_ids, name):329 super().__init__({}, "atom", "atom", input_size, output_size, num_units, name=name)330 self.feature_tensor = torch.tensor([var_select_ids]).to(device)331 def execute_on_batch(self, batch, batch_lens=None):332 assert len(batch.size()) == 2333 #print (f'var_sel = {batch} and batch_lens = {batch.size()} and self.feature_tensor = {self.feature_tensor}')334 features = torch.index_select(batch, 1, self.feature_tensor).to(device)335 #print (f'returned feature = {features}')336 return features...

Full Screen

Full Screen

diff_equations.py

Source:diff_equations.py Github

copy

Full Screen

...6 x.append(x0)7 y.append(y0)8 for i in range(num - 1):9 x.append(x[i] + h)10 y.append(y[i] + function1(x[i], y[i]) * h)11 for i in range(num):12 yt.append(function2(x[i],))13 return x, y, yt14def eulerian_modified_method(function1, function2, beg, end, x0, y0, num):15 x = []16 y = []17 h = (end - beg) / num18 yt = []19 x.append(x0)20 y.append(y0)21 for i in range(num - 1):22 k = function1(x[i], y[i]) * h23 x.append(x[i] + h)24 y.append(y[i] + h * function1(x[i] + h / 2, y[i] + k / 2))25 for i in range(num):26 yt.append(function2(x[i]))27 return x, y, yt28def eulerian_fourth_method(function1, function2, beg, end, x0, y0, num):29 x = []30 y = []31 h = (end - beg) / num32 yt = []33 x.append(x0)34 y.append(y0)35 for i in range(num - 1):36 k1 = function1(x[i], y[i]) * h37 k2 = function1(x[i] + h / 2, y[i] + k1 / 2) * h38 k3 = function1(x[i] + h / 2, y[i] + k2 / 2) * h39 k4 = function1(x[i] + h, y[i] + k3) * h40 x.append(x[i] + h)41 y.append(y[i] + (k1 + 2 * k2 + 2 * k3 + k4) / 6)42 for i in range(num):43 yt.append(function2(x[i]))44 return x, y, yt45def runge_kutta_second_grade(function1, function2, beg, end, x0, y0, num, C2):46 x = []47 y = []48 h = (end - beg) / num49 yt = []50 alpha = 1 / (2 * C2)51 C1 = 1 - C252 x.append(x0)53 y.append(y0)54 k1 = []55 k2 = []56 for i in range(num - 1):57 k1.append(function1(x[i], y[i]) * h)58 k2.append(function1(x[i] + alpha * h, y[i] + alpha * k1[i]) * h)59 x.append(x[i] + h)60 y.append(y[i] + C1 * k1[i] + C2 * k2[i])61 for i in range(num):62 yt.append(function2(x[i]))63 return x, y, yt64def runge_kutta_third_grade(function1, function2, beg, end, x0, y0, num, alpha2, alpha3):65 x = []66 y = []67 yt = []68 h = (end - beg) / num69 x.append(x0)70 y.append(y0)71 C2 = (alpha3 / 2 - 1 / 3) / (alpha2 * (alpha2 - alpha3))72 C3 = (1 / 2 - C2 * alpha2) / alpha373 b21 = alpha274 b32 = 1 / (6 * alpha2 * C3)75 b31 = alpha3 - b3276 C1 = 1 - C2 - C377 for i in range(num - 1):78 k1 = function1(x[i], y[i]) * h79 k2 = function1(x[i] + alpha2 * h, y[i] + b21 * k1) * h80 k3 = function1(x[i] + alpha3 * h, y[i] + b31 * k1 + b32 * k2) * h81 x.append(x[i] + h)82 y.append(y[i] + C1 * k1 + C2 * k2 + C3 * k3)83 for i in range(num):84 yt.append(function2(x[i]))85 return x, y, yt86def runge_kutta_fourth_grade(function1, function2, beg, end, x0, y0, num, alpha2, alpha3, alpha4):87 if alpha2 * alpha3 * alpha4 * (alpha3 - alpha2) * (alpha4 - alpha2) * (alpha4 - alpha3) != 0:88 alpha4 = 189 h = (end - beg) / num90 C2 = (2 * alpha3 - 1) / (12 * alpha2 * (alpha3 - alpha2) * (1 - alpha3))91 C4 = (6 * alpha2 * alpha3 - 4 * alpha2 - 4 * alpha3 + 3) / (12 * (1 - alpha2) * (1 - alpha3))92 b42 = (4 * alpha3 ** 2 - alpha2 - 5 * alpha3 + 2) / (24 * C4 * alpha3 * (alpha3 - alpha2))93 C3 = (1 - 2 * alpha2) / (12 * alpha3 * (1 - alpha2) * (1 - alpha3))94 b43 = (1 - 2 * alpha2) / (24 * C4 * alpha3 * (alpha3 - alpha2))95 b41 = 1 - b42 - b4396 b21 = alpha297 b32 = 1 / (24 * C4 * b43 * alpha2)98 b31 = alpha3 - b3299 C1 = 1 - C2 - C3 - C4100 y = []101 yt = []102 x = []103 x.append(x0)104 y.append(y0)105 for i in range(num - 1):106 k1 = function1(x[i], y[i]) * h107 k2 = function1(x[i] + alpha2 * h, y[i] + b21 * k1) * h108 k3 = function1(x[i] + alpha3 * h, y[i] + b31 * k1 + b32 * k2) * h109 k4 = function1(x[i] + alpha4 * h, y[i] + b41 * k1 + b42 * k2 + b43 * k3) * h110 x.append(x[i] + h)111 y.append(y[i] + C1 * k1 + C2 * k2 + C3 * k3 + C4 * k4)112 for i in range(num):113 yt.append(function2(x[i]))114 return x, y, yt115 return 0, 0, 0116def runge_kutta_fourth_grade_delta_zero1(function1, function2, beg, end, x0, y0, num, C3):117 alpha2 = 1 / 2118 alpha3 = 0119 alpha4 = 1120 if alpha2 * alpha3 * alpha4 * (alpha3 - alpha2) * (alpha4 - alpha2) * (alpha4 - alpha3) == 0:121 h = (end - beg) / num122 C2 = 2 / 3123 C4 = 1 / 6124 b42 = 3 / 2125 b43 = 6 * C3126 b41 = -1 / 2 - 6 * C3127 b21 = 1 / 2128 b32 = 1 / (12 * C3)129 b31 = -1 / (12 * C3)130 C1 = 1 / 6 - C3131 y = []132 yt = []133 x = []134 x.append(x0)135 y.append(y0)136 for i in range(num - 1):137 k1 = function1(x[i], y[i]) * h138 k2 = function1(x[i] + alpha2 * h, y[i] + b21 * k1) * h139 k3 = function1(x[i] + alpha3 * h, y[i] + b31 * k1 + b32 * k2) * h140 k4 = function1(x[i] + alpha4 * h, y[i] + b41 * k1 + b42 * k2 + b43 * k3) * h141 x.append(x[i] + h)142 y.append(y[i] + C1 * k1 + C2 * k2 + C3 * k3 + C4 * k4)143 for i in range(num):144 yt.append(function2(x[i]))145 return x, y, yt146 else:147 return 0, 0, 0148def runge_kutta_fourth_grade_delta_zero2(function1, function2, beg, end, x0, y0, num, C3):149 alpha2 = 1 / 2150 alpha3 = 1 / 2151 alpha4 = 1152 if alpha2 * alpha3 * alpha4 * (alpha3 - alpha2) * (alpha4 - alpha2) * (alpha4 - alpha3) == 0:153 h = (end - beg) / num154 C2 = 2 / 3 - C3155 C4 = 1 / 6156 b42 = 1 - 3 * C2157 b43 = 3 * C3158 b41 = 0159 b21 = 1 / 2160 b32 = 1 / (6 * C3)161 b31 = 1 / 2 - 1 / (6 * C3)162 C1 = 1 / 6163 y = []164 yt = []165 x = []166 x.append(x0)167 y.append(y0)168 for i in range(num - 1):169 k1 = function1(x[i], y[i]) * h170 k2 = function1(x[i] + alpha2 * h, y[i] + b21 * k1) * h171 k3 = function1(x[i] + alpha3 * h, y[i] + b31 * k1 + b32 * k2) * h172 k4 = function1(x[i] + alpha4 * h, y[i] + b41 * k1 + b42 * k2 + b43 * k3) * h173 x.append(x[i] + h)174 y.append(y[i] + C1 * k1 + C2 * k2 + C3 * k3 + C4 * k4)175 for i in range(num):176 yt.append(function2(x[i]))177 return x, y, yt178 else:179 return 0, 0, 0180def runge_kutta_fourth_grade_delta_zero3(function1, function2, beg, end, x0, y0, num, C4):181 alpha2 = 1182 alpha3 = 1 / 2183 alpha4 = 1184 if alpha2 * alpha3 * alpha4 * (alpha3 - alpha2) * (alpha4 - alpha2) * (alpha4 - alpha3) == 0:185 h = (end - beg) / num186 C2 = 1 / 6 - C4187 C3 = 2 / 3188 b42 = -1 / (6 * C4)189 b43 = 1 / (3 * C4)190 b41 = 1 - 1 / (6 * C4)191 b21 = 1192 b32 = 1 / 8193 b31 = 3 / 8194 C1 = 1 / 6195 y = []196 yt = []197 x = []198 x.append(x0)199 y.append(y0)200 for i in range(num - 1):201 k1 = function1(x[i], y[i]) * h202 k2 = function1(x[i] + alpha2 * h, y[i] + b21 * k1) * h203 k3 = function1(x[i] + alpha3 * h, y[i] + b31 * k1 + b32 * k2) * h204 k4 = function1(x[i] + alpha4 * h, y[i] + b41 * k1 + b42 * k2 + b43 * k3) * h205 x.append(x[i] + h)206 y.append(y[i] + C1 * k1 + C2 * k2 + C3 * k3 + C4 * k4)207 for i in range(num):208 yt.append(function2(x[i]))209 return x, y, yt210 else:211 return 0, 0, 0212def adams_third_grade(function1, function2, beg, end, xn, yn, num):213 h = (end - beg) / num214 y = yn215 x = xn216 yt = []217 for i in range(num - 3):218 y.append(y[-1] + h * (23 * function1(x[-1], y[-1]) / 12 - 4 * function1(x[-2], y[-2]) / 3 +219 5 * function1(x[-3], y[-3]) / 12))220 x.append(x[i + 2] + h)221 for i in range(num):222 yt.append(function2(x[i]))223 return x, y, yt224def adams_fourth_grade(function1, function2, beg, end, xn, yn, num):225 h = (end - beg) / num226 y = yn227 x = xn228 yt = []229 for i in range(num - 4):230 y.append(y[-1] + h * (55 * function1(x[-1], y[-1])/24 - 59 * function1(x[-2], y[-2])/24 +231 37 * function1(x[-3], y[-3])/24232 - 3 * function1(x[-4], y[-4])/24))233 x.append(x[i + 2] + h)234 for i in range(num):235 yt.append(function2(x[i]))...

Full Screen

Full Screen

test_run.py

Source:test_run.py Github

copy

Full Screen

...21 def test_run_explicit(self):22 self._test_run_explicit(self._run_explicit_command(self.piped))23 def test_run_no_args(self):24 output = self.run_pysource_script([25 command.source_def('function1(): return 1',26 piped=self.piped),27 command.run('function1')28 ])29 self.assertEqual(output, '1')30 def test_run_no_return_value(self):31 output = self.run_pysource_script([32 command.source_def('function1(): pass',33 piped=self.piped),34 command.run('function1')35 ], strip=False)36 self.assertEqual(output, '')37 def test_run_with_args(self):38 name = 'john'39 output = self.run_pysource_script([40 command.source_def('function1(name): return name*2',41 piped=self.piped),42 command.run('function1', name)43 ])44 self.assertEqual(output, name*2)45 def test_run_with_typed_args(self):46 number = 347 output = self.run_pysource_script([48 command.source_def('function1(number=int): return number**3',49 piped=self.piped),50 command.run('function1', number)51 ])52 self.assertEqual(output, str(number**3))53 def test_run_with_varargs(self):54 names = ['john', 'doe']55 output = self.run_pysource_script([56 command.source_def('function1(*names): return list(names+names)',57 piped=self.piped),58 command.run('function1', *names)59 ])60 self.assertEqual(output, str(names+names))61 def test_run_with_kwargs(self):62 output = self.run_pysource_script([63 command.source_def('function1(**kwargs): return 1',64 piped=self.piped),65 command.run('function1')66 ])67 self.assertEqual(output, '1')68 def test_run_with_args_and_varargs(self):69 name = 'jane'70 names = ['john', 'doe']71 args = [name] + names72 output = self.run_pysource_script([73 command.source_def('''function1(name, *names):74 return [name]+list(names)''', piped=self.piped),75 command.run('function1', *args)76 ])77 self.assertEqual(output, str(args))78 def test_run_raises_exception(self):79 self.assertRaises(80 sh.ErrorReturnCode,81 self.run_pysource_script,82 [command.source_def('function1(): raise RuntimeError()',83 piped=self.piped),84 command.run('function1')])85 def test_run_too_many_args_no_varargs(self):86 self.assertRaises(sh.ErrorReturnCode,87 self.run_pysource_script,88 [89 command.source_def('function1(): pass',90 piped=self.piped),91 command.run('function1', 'arg')92 ])93 def test_run_too_few_args_no_varargs(self):94 self.assertRaises(sh.ErrorReturnCode,95 self.run_pysource_script,96 [97 command.source_def('function1(arg): pass',98 piped=self.piped),99 command.run('function1')100 ])101 def test_run_too_few_args_with_varargs(self):102 self.assertRaises(sh.ErrorReturnCode,103 self.run_pysource_script,104 [105 command.source_def('function1(ar, *args): pass',106 piped=self.piped),107 command.run('function1')108 ])109 def test_run_no_function(self):110 self.assertRaises(sh.ErrorReturnCode,111 self.run_pysource_script,112 [command.source_named('function1',113 piped=self.piped),114 command.run('function1')])115 def test_run_with_run_piped_mode(self):116 self.assertRaises(sh.ErrorReturnCode,117 self._test_run_explicit,118 self._run_explicit_command(not self.piped))119 def test_using_pipes_when_non_piped_mode(self):120 if self.piped:121 return122 import_statement = 'from pysource import stdin, stdout'123 self.run_pysource_script([command.source_inline(import_statement)])124 self.assertRaises(125 sh.ErrorReturnCode,126 self.run_pysource_script,127 [command.source_def('function1(): stdin.read()'),128 command.run('function1')])129 self.assertRaises(130 sh.ErrorReturnCode,131 self.run_pysource_script,132 [command.source_def('function1(): stdout.write("1")'),133 command.run('function1')])134 def _test_run_explicit(self, run_explicit_command):135 output = self.run_pysource_script([136 command.source_def('function1(): return 1',137 piped=self.piped),138 run_explicit_command('function1')139 ])140 self.assertEqual(output, '1')141 def _run_explicit_command(self, piped):142 if piped:143 return command.run_piped_explicit144 else:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run tempest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful