How to use set_param_names method in pytest-bdd

Best Python code snippet using pytest-bdd_python

predictor_theano.py

Source:predictor_theano.py Github

copy

Full Screen

1import os2import time3from collections import OrderedDict4import h5py5import lasagne6import matplotlib.pyplot as plt7import numpy as np8import theano9import theano.tensor as T10from visual_dynamics.utils import iter_util11try:12 from visual_dynamics.utils import visualization_theano13except ImportError:14 visualization_theano = None15from visual_dynamics.utils.config import ConfigObject, from_yaml16from visual_dynamics.utils.container import MultiDataContainer17from visual_dynamics.utils.transformer import Transformer18from . import predictor19class TheanoNetPredictor(predictor.NetPredictor, ConfigObject):20 def __init__(self, build_net, input_names, input_shapes, transformers=None, name=None, pretrained_fname=None,21 solvers=None, environment_config=None, policy_config=None, **kwargs):22 """23 Args:24 build_net: Function that builds the net and returns a dict the network layers, which should contain at least25 all the root layers of the network. Different keys of this dict can map to the same layer.26 input_names: Iterable of names of the input variables (e.g. the image and velocity)27 input_shapes: Iterable of shapes for the image and velocity inputs.28 transformers: Iterable of transformers for the image and velocity inputs.29 name: Name of this net predictor. Defaults to class name.30 pretrained_fname: File name of h5 file with parameters to initialize the parameters of this net. The31 file name could also be an iteration number, in which case the file with the corresponding number32 in the default snapshot directory is used.33 kwargs: Optional arguments that are passed to build_net.34 """35 self.build_net = build_net36 self._kwargs = kwargs37 self.__dict__.update(kwargs)38 predictor.NetPredictor.__init__(self, input_names, input_shapes, transformers=transformers, name=name, backend='theano')39 self.pred_layers = self.build_net(self.preprocessed_input_shapes, **kwargs)40 for pred_layer in list(self.pred_layers.values()):41 self.pred_layers.update((layer.name, layer) for layer in lasagne.layers.get_all_layers(pred_layer) if layer.name is not None)42 self.input_vars = [self.pred_layers[input_name].input_var for input_name in self.input_names if input_name in self.pred_layers]43 # layer_name_aliases = [('x0', 'x'), ('x', 'x0'), ('x0_next', 'x_next'), ('x_next', 'x0_next'), ('x0', 'image_curr'), ('x0_next', 'image_next'), ('x0_next_pred', 'image_next_pred')]44 # for name, name_alias in layer_name_aliases:45 # if name in self.pred_layers and name_alias not in self.pred_layers:46 # self.pred_layers[name_alias] = self.pred_layers[name]47 # self.pred_vars = OrderedDict(zip(self.pred_layers.keys(),48 # lasagne.layers.get_output(self.pred_layers.values(), deterministic=True)))49 print("Network %s has %d parameters" % (self.name, lasagne.layers.count_params(self.pred_layers.values())))50 self.transformers = transformers or [Transformer() for _ in self.preprocessed_input_shapes]51 self.pred_fns = {}52 self.jac_fns = {}53 if pretrained_fname is not None:54 try:55 iter_ = int(pretrained_fname)56 pretrained_fname = '%s_iter_%d_model.h5' % (self.get_snapshot_prefix(), iter_)57 except ValueError:58 pretrained_fname = pretrained_fname.replace('.yaml', '.h5')59 self.copy_from(pretrained_fname)60 # draw net and save to file61 net_graph_fname = os.path.join(self.get_model_dir(), 'net_graph.png')62 if visualization_theano is not None:63 visualization_theano.draw_to_file(self.get_all_layers(), net_graph_fname, output_shape=True, verbose=True)64 self._draw_fig_num = None65 # self.draw()66 self.solvers = solvers or []67 self.environment_config = environment_config68 self.policy_config = policy_config69 def train(self, solver_or_fname):70 if isinstance(solver_or_fname, str):71 with open(solver_or_fname) as solver_file:72 solver = from_yaml(solver_file)73 else:74 solver = solver_or_fname75 self.solvers.append(solver)76 data_fnames = solver.train_data_fnames + solver.val_data_fnames77 with MultiDataContainer(data_fnames) as data_container:78 environment_config = data_container.get_info('environment_config')79 policy_config = data_container.get_info('policy_config')80 if self.environment_config:81 if self.environment_config != environment_config:82 raise ValueError('environment config mismatch across trainings:\n%r\n%r'83 % (self.environment_config, environment_config))84 else:85 self.environment_config = environment_config86 if self.policy_config:87 if self.policy_config != policy_config:88 raise ValueError('policies config mismatch across trainings:\n%r\n%r'89 % (self.policy_config, policy_config))90 else:91 self.policy_config = policy_config92 solver.solve(self)93 def _compile_pred_fn(self, names):94 output_layers = [self.pred_layers[name] for name in names]95 pred_vars = lasagne.layers.get_output(output_layers, deterministic=True)96 input_vars = [input_var for input_var in self.input_vars if input_var in97 theano.gof.graph.inputs(pred_vars)]98 start_time = time.time()99 print("Compiling prediction function...")100 pred_fn = theano.function(input_vars, pred_vars)101 print("... finished in %.2f s" % (time.time() - start_time))102 return pred_fn103 def predict(self, name_or_names, inputs, preprocessed=False):104 names = tuple(iter_util.flatten_tree(name_or_names))105 batch_size = self.batch_size(inputs, preprocessed=preprocessed)106 if not preprocessed:107 inputs = self.preprocess(inputs)108 inputs = [input_.astype(theano.config.floatX, copy=False) for input_ in inputs]109 if batch_size == 0:110 inputs = [input_[None, ...] for input_ in inputs]111 pred_fn = self.pred_fns.get(names) or self.pred_fns.setdefault(names, self._compile_pred_fn(names))112 preds = pred_fn(*inputs)113 if batch_size == 0:114 preds = [np.squeeze(pred, 0) for pred in preds]115 return iter_util.unflatten_tree(name_or_names, preds)116 def _get_jacobian_var(self, names, wrt_name, mode=None):117 """118 Returns the jacobian expressions and the respective outputs for a single119 data point. Assumes that the inputs being passed have a single leading120 dimension (i.e. batch_size=1).121 """122 output_wrt_vars = lasagne.layers.get_output([self.pred_layers[name] for name in (names + (wrt_name,))], deterministic=True)123 output_vars, wrt_var = output_wrt_vars[:-1], output_wrt_vars[-1]124 output_wrt_shapes = lasagne.layers.get_output_shape([self.pred_layers[name] for name in (names + (wrt_name,))])125 output_shapes, wrt_shape = output_wrt_shapes[:-1], output_wrt_shapes[-1]126 output_dim = sum([np.prod(output_shape[1:]) for output_shape in output_shapes])127 if len(wrt_shape) != 2 or wrt_shape[0] not in (1, None):128 raise ValueError("the shape of the wrt variable is %r but the"129 "variable should be two-dimensional with the"130 "leading axis being a singleton or None")131 _, wrt_dim = wrt_shape132 if mode is None:133 if wrt_dim < output_dim:134 mode = 'forward'135 else:136 mode = 'reverse'137 if mode in ('fwd', 'forward'):138 # compute jacobian as multiple Rop jacobian-vector product gradients for each index of wrt variable139 output_var = T.concatenate([output_var.flatten() for output_var in output_vars])140 jac_var, _ = theano.scan(lambda eval_points, output_var, wrt_var: theano.gradient.Rop(output_var, wrt_var, eval_points),141 sequences=np.eye(wrt_dim)[:, None, :],142 non_sequences=[output_var, wrt_var])143 jac_var = jac_var.T144 elif mode == 'batched':145 # same as forward mode but using batch computations as opposed to scan146 # see https://github.com/Theano/Theano/issues/4087147 output_var = T.concatenate([T.flatten(output_var, outdim=2) for output_var in output_vars], axis=1)148 jac_var = theano.gradient.Rop(output_var, wrt_var, np.eye(wrt_dim))149 input_vars = [input_var for input_var in self.input_vars if input_var in theano.gof.graph.inputs([jac_var])]150 rep_dict = {input_var: T.repeat(input_var, wrt_dim, axis=0) for input_var in input_vars}151 jac_var = theano.clone(jac_var, replace=rep_dict)152 jac_var = jac_var.T153 elif mode in ('rev', 'reverse'):154 # compute jacobian as multiple Lop vector-jacobian product gradients for each index of the output variable155 output_var = T.concatenate([output_var.flatten() for output_var in output_vars])156 jac_var = theano.gradient.jacobian(output_var, wrt_var)157 jac_var = jac_var[:, 0, :]158 elif mode == 'linear':159 jac_vars = []160 for output_var in output_vars:161 input_vars = [input_var for input_var in self.input_vars if162 input_var in theano.gof.graph.inputs([output_var])]163 # using tensordot to multiply ones with the input_var seems to be faster than using repeat164 # rep_dict = {input_var: T.repeat(input_var, wrt_dim + 1, axis=0)165 # for input_var in input_vars if input_var != wrt_var}166 rep_dict = {input_var: T.tensordot(T.ones((wrt_dim + 1, 1)), input_var, axes=1)167 for input_var in input_vars if input_var != wrt_var}168 rep_dict[wrt_var] = np.r_[np.zeros((1, wrt_dim)), np.eye(wrt_dim)].astype(theano.config.floatX)169 rep_output_var = theano.clone(output_var, replace=rep_dict)170 jac_var = rep_output_var[1:] - rep_output_var[0]171 jac_var = jac_var.reshape((wrt_dim, -1, 1)).T172 jac_vars.append(jac_var)173 else:174 raise ValueError('mode can only be fwd, forward, rev, reverse, batched or linear, but %r was given' % mode)175 if mode != 'linear':176 split_inds = np.r_[0, np.cumsum([np.prod(output_shape[1:]) for output_shape in output_shapes])]177 jac_vars = [jac_var[start_ind:end_ind].reshape((1, -1, wrt_dim)) for (start_ind, end_ind) in zip(split_inds[:-1], split_inds[1:])]178 return jac_vars, output_vars179 def _compile_jacobian_fn(self, names, wrt_name, ret_outputs=False, mode=None):180 jac_vars, output_vars = self._get_jacobian_var(names, wrt_name, mode=mode)181 if ret_outputs:182 all_vars = jac_vars + output_vars183 else:184 all_vars = jac_vars185 input_vars = [input_var for input_var in self.input_vars if input_var in theano.gof.graph.inputs(all_vars)]186 start_time = time.time()187 print("Compiling jacobian function...")188 jac_fn = theano.function(input_vars, all_vars, on_unused_input='warn')189 print("... finished in %.2f s" % (time.time() - start_time))190 return jac_fn191 def jacobian(self, name_or_names, wrt_name, inputs, preprocessed=False, ret_outputs=False, mode=None):192 names = tuple(iter_util.flatten_tree(name_or_names))193 batch_size = self.batch_size(inputs, preprocessed=preprocessed)194 if not preprocessed:195 inputs = self.preprocess(inputs)196 inputs = [input_.astype(theano.config.floatX, copy=False) for input_ in inputs]197 if batch_size in (0, 1):198 if batch_size == 0:199 inputs = [input_[None, :] for input_ in inputs]200 jac_fn_args = (names, wrt_name, ret_outputs, mode)201 jac_fn = self.jac_fns.get(jac_fn_args) or \202 self.jac_fns.setdefault(jac_fn_args, self._compile_jacobian_fn(*jac_fn_args))203 preds = jac_fn(*inputs)204 if batch_size == 0:205 preds = [np.squeeze(pred, 0) for pred in preds]206 else:207 batched_preds = [self.jacobian(names, wrt_name, single_inputs,208 preprocessed=True, ret_outputs=ret_outputs,209 mode=mode) for single_inputs in zip(*inputs)]210 if ret_outputs:211 batched_preds = [sum(preds, []) for preds in batched_preds]212 preds = [np.array(pred) for pred in zip(*batched_preds)]213 if ret_outputs:214 return iter_util.unflatten_tree([name_or_names, name_or_names], preds)215 else:216 return iter_util.unflatten_tree(name_or_names, preds)217 def get_all_layers(self):218 layers = []219 for pred_layer in list(self.pred_layers.values()):220 layers.extend(lasagne.layers.get_all_layers(pred_layer))221 return lasagne.utils.unique(layers)222 def get_all_params(self, **tags):223 params = lasagne.layers.get_all_params(self.pred_layers.values(), **tags)224 params_dict = OrderedDict([(param.name, param) for param in params])225 if len(params_dict) != len(params):226 raise ValueError('parameters do not have unique names')227 return params_dict228 def get_all_param_values(self, **tags):229 params_dict = self.get_all_params(**tags)230 param_values_dict = OrderedDict([(name, param.get_value()) for (name, param) in params_dict.items()])231 return param_values_dict232 def set_all_param_values(self, param_values_dict, **tags):233 params_dict = self.get_all_params(**tags)234 set_param_names = []235 skipped_param_names = []236 for name, value in param_values_dict.items():237 try:238 param = params_dict[name]239 except KeyError:240 skipped_param_names.append(name)241 continue242 if param.get_value().shape != value.shape:243 raise ValueError('mismatch: parameter has shape %r but value to set has shape %r' %244 (param.get_value().shape, value.shape))245 param.set_value(value)246 set_param_names.append(name)247 if skipped_param_names:248 print('skipped parameters with names: %r' % skipped_param_names)249 print('set parameters with names: %r' % set_param_names)250 def save_model(self, model_fname):251 model_fname = model_fname.replace('.yaml', '.h5')252 all_param_values = self.get_all_param_values()253 print("Saving model parameters to file", model_fname)254 with h5py.File(model_fname, 'w') as h5_file:255 for name, value in all_param_values.items():256 h5_file.create_dataset(name, data=value)257 return model_fname258 def copy_from(self, model_fname):259 print("Copying model parameters from file", model_fname)260 with h5py.File(model_fname, 'r') as h5_file:261 param_values = dict()262 for name in h5_file.keys():263 param_values[name] = h5_file[name][:]264 param_values = OrderedDict([(name, value.astype(theano.config.floatX, copy=False)) for (name, value) in param_values.items()])265 self.set_all_param_values(param_values)266 def draw(self):267 net_graph_fname = os.path.join(self.get_model_dir(), 'net_graph.png')268 with open(net_graph_fname, 'rb') as net_graph_file:269 image = plt.imread(net_graph_file)270 plt.ion()271 fig = plt.figure(num=self._draw_fig_num, figsize=(10.*image.shape[1]/image.shape[0], 10.), tight_layout=True)272 self._draw_fig_num = fig.number273 plt.axis('off')274 fig.canvas.set_window_title('Net graph for %s' % self.name)275 plt.imshow(image)276 plt.draw()277 def _get_config(self):278 config = ConfigObject._get_config(self)279 config.update({'build_net': self.build_net,280 'input_names': self.input_names,281 'input_shapes': self.input_shapes,282 'transformers': self.transformers,283 'name': self.name,284 'solvers': self.solvers,285 'environment_config': self.environment_config,286 'policy_config': self.policy_config})287 config.update(self._kwargs)288 return config289class TheanoNetFeaturePredictor(TheanoNetPredictor, predictor.FeaturePredictor):290 def __init__(self, build_net, input_names, input_shapes, feature_name,291 next_feature_name, control_name, feature_jacobian_name=None,292 transformers=None, name=None, pretrained_fname=None, **kwargs):293 TheanoNetPredictor.__init__(294 self, build_net, input_names, input_shapes,295 transformers=transformers, name=name,296 pretrained_fname=pretrained_fname, **kwargs)297 predictor.FeaturePredictor.__init__(298 self, input_names, input_shapes, feature_name, next_feature_name,299 control_name, feature_jacobian_name=feature_jacobian_name,300 transformers=transformers, name=name)301 def feature_jacobian(self, inputs, preprocessed=False, mode=None):302 assert len(inputs) == 2303 if self.feature_jacobian_name:304 jac, next_feature = \305 self.predict([self.feature_jacobian_name, self.next_feature_name],306 inputs, preprocessed=preprocessed)307 else:308 jac, next_feature = self.jacobian(self.next_feature_name, self.control_name,309 inputs, preprocessed=preprocessed,310 ret_outputs=True, mode=mode)311 return jac, next_feature312 def _get_config(self):313 config = dict(TheanoNetPredictor._get_config(self))314 config.update(predictor.FeaturePredictor._get_config(self))...

Full Screen

Full Screen

automock.py

Source:automock.py Github

copy

Full Screen

...191 return funcdecl.args.params if funcdecl.args else []192 def extract_funcname(self, funcdecl):193 return self.extract_typedecl(funcdecl).declname194 def make_mock(self, typedefs, funcdecl):195 # Make a copy so I can modify it with self.set_param_names()196 funcdecl = copy.deepcopy(funcdecl)197 funcname = self.extract_funcname(funcdecl)198 returntype = funcdecl.type199 typedef = self.find_typedef(typedefs, returntype)200 if typedef:201 return_hint = self.select_return_hint(typedef)202 else:203 return_hint = self.select_return_hint(returntype)204 returntextnode = self.make_return_text_node(returntype)205 params = self.funcdecl_params(funcdecl)206 arg_names = self.arg_names(params)207 arg_hints = self.arg_hints(typedefs, params)208 self.set_param_names(params, arg_names, arg_hints)209 args_info = self.make_args_info(arg_names, arg_hints)210 return MockInfo(mockname = "mock_" + funcname,211 funcname = funcname,212 prototype = self.cgen.visit(funcdecl),213 return_text = self.cgen.visit(returntextnode),214 return_hint = return_hint,215 args_info = args_info)216 def find_typedef(self, typedefs, type):217 if isinstance(type, TypeDecl):218 basetype = type.type219 if isinstance(basetype, IdentifierType):220 typename = ' '.join(basetype.names)221 if typename in typedefs:222 return typedefs[typename]223 return None224 def select_return_hint(self, returntype):225 if isinstance(returntype, PtrDecl):226 return ReturnHint.POINTER227 elif isinstance(returntype, TypeDecl):228 basetype = returntype.type229 if isinstance(basetype, Struct):230 return ReturnHint.BLOB231 elif isinstance(basetype, Enum):232 return ReturnHint.PRIMITIVE233 elif isinstance(basetype, IdentifierType):234 if basetype.names == ['void']:235 return ReturnHint.VOID236 else:237 return ReturnHint.PRIMITIVE238 else:239 # FIXME: Thow a more specific exception240 raise Exception("Couldn't match return type to hint")241 # CGenerator won't correctly print a type-name if passed a242 # modifier like PtrDecl or ArrayDecl, unless it is wrapped in a243 # Typename or certain other kinds of nodes.244 def make_return_text_node(self, returntype):245 returntextnode = Typename(name=None,246 quals=[],247 type=copy.deepcopy(returntype))248 self.set_declname(returntextnode, None)249 return returntextnode250 def set_declname(self, node, name):251 typedecl = self.extract_typedecl(node)252 typedecl.declname = name253 def arg_names(self, params):254 return ['__pfstest_arg_%s' % i for i in range(len(params))]255 def arg_hints(self, typedefs, params):256 hints = []257 for param in params:258 typedef = self.find_typedef(typedefs, param.type)259 type = typedef or param.type260 hints.append(self.select_arg_hint(type))261 if ArgHint.VOID in hints:262 return []263 return hints264 def select_arg_hint(self, paramtype):265 if isinstance(paramtype, TypeDecl):266 basetype = paramtype.type267 if isinstance(basetype, IdentifierType):268 if basetype.names == ['void']:269 return ArgHint.VOID270 else:271 return ArgHint.BLOB272 elif isinstance(basetype, Struct):273 return ArgHint.BLOB274 elif isinstance(basetype, Enum):275 return ArgHint.BLOB276 else:277 # FIXME: Thow a more specific exception278 raise Exception("Couldn't match param type to hint")279 elif isinstance(paramtype, PtrDecl):280 return ArgHint.POINTER281 elif isinstance(paramtype, ArrayDecl):282 return ArgHint.POINTER283 def set_param_names(self, params, names, hints):284 for (param, name, hint) in zip(params, names, hints):285 self.set_declname(param, name)286 def make_args_info(self, names, hints):287 return [ArgInfo(name, hint)288 for (name, hint) in zip(names, hints)]289def enum(name, fields):290 class EnumValue:291 def __init__(self, clsname, name):292 self.repr = '%s.%s' % (clsname, name)293 def __repr__(self):294 return self.repr295 dict_ = dict((field, EnumValue(name, field))296 for field in fields.split(' '))297 return type(name, (), dict_)...

Full Screen

Full Screen

layer.py

Source:layer.py Github

copy

Full Screen

...83 expr = property(get_expr)84 85 def set_name(self, name):86 self.name = name87 self.set_param_names()88 89 def set_param_names(self):90 for param in self.params:91 param.name = "{0}_{1}".format(self.name, param._symbol_name)92 93 @property94 def params(self):95 # find layer parameters96 attrs = list(self.__dict__.keys())97 def f(x):98 return (not x.startswith("_") and99 isinstance(self.__dict__[x], LayerParameter))100 p_attrs = list(filter(f, attrs))101 params = list(map(lambda x: self.__dict__[x], p_attrs))102 return params 103 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytest-bdd automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful