How to use get_free method in avocado

Best Python code snippet using avocado_python

models.py

Source:models.py Github

copy

Full Screen

...35 self.L2Lambda = None36 def eval_objective(self, free_params):37 pass38 def get_params(self):39 return self.params.get_free()40 def dump_state(self, xk):41 '''42 callback to save the state to disk during optimization43 '''44 filename = 'state.txt'45 if not os.path.exists(filename):46 past = np.zeros((0,xk.shape[0]))47 else:48 past = np.loadtxt(filename)49 if past.ndim < 2:50 past = past.reshape(1,-1)51 np.savetxt(filename, np.append(past, xk.reshape(1,-1), axis=0))52 def fit(self,53 warm_start=True,54 label=None,55 save=False, 56 use_fit_L1=False,57 tol=1e-15,58 dump_state=False,59 **kwargs):60 '''61 Trains the model62 '''63 if False and hasattr(self, 'objective_gradient'):64 eval_objective_grad = self.objective_gradient65 else:66 eval_objective_grad = autograd.grad(self.eval_objective)67 68 eval_objective = lambda theta: self.eval_objective(theta)69 eval_objective_hess = autograd.hessian(self.eval_objective)70 eval_objective_hvp = autograd.hessian_vector_product(self.eval_objective)71 if use_fit_L1:72 fitL1.fit_L1(self, **kwargs)73 else:74 if dump_state:75 callback = self.dump_state76 else:77 callback = None78 opt_res = scipy.optimize.minimize(eval_objective,79 jac=eval_objective_grad,80 hessp=eval_objective_hvp,81 hess=eval_objective_hess,82 x0=copy.deepcopy(self.params.get_free()),83 callback=callback,84 method='trust-ncg',85 tol=tol,86 options={87 'initial_trust_radius':0.1,88 'max_trust_radius':1,89 'gtol':tol,90 'disp':False,91 'maxiter':10000092 })93 self.params.set_free(opt_res.x)94 if np.linalg.norm(opt_res.jac) > .01:95 print('Got grad norm', np.linalg.norm(opt_res.jac))96 # TODO: can we rewrite to avoid rewriting the instance var each time?97 def weighted_model_objective(self, example_weights, free_params):98 ''' The actual objective that we differentiate '''99 self.example_weights = example_weights100 return self.eval_objective(free_params)101 def compute_gradients(self, weights):102 if self.is_a_glm:103 self.compute_derivs()104 grads = (self.D1 * weights)[np.newaxis,:] * self.training_data.X.copy().T105 else:106 dObj_dParams = autograd.jacobian(self.weighted_model_objective, argnum=1)107 d2Obj_dParamsdWeights = autograd.jacobian(dObj_dParams, argnum=0)108 array_box_go_away = self.params.get_free().copy()109 cur_weights = self.example_weights.copy()110 grads = d2Obj_dParamsdWeights(some_example_weights,111 self.params.get_free())112 self.params.set_free(array_box_go_away)113 return grads114 def compute_dParams_dWeights(self, some_example_weights,115 solver_method='cholesky',116 non_fixed_dims=None,117 rank=-1,118 **kwargs):119 '''120 sets self.jacobian = dParams_dxn for each datapoint x_n121 rank = -1 uses a full-rank matrix solve (i.e. np.linalg.solve on the full122 Hessian). A positive integer uses a low rank approximation in123 inverse_hessian_vector_product 124 125 '''126 if non_fixed_dims is None:127 non_fixed_dims = np.arange(self.params.get_free().shape[0])128 if len(non_fixed_dims) == 0:129 self.dParams_dWeights = np.zeros((0,some_example_weights.shape[0]))130 return131 132 dObj_dParams = autograd.jacobian(self.weighted_model_objective, argnum=1)133 d2Obj_dParams2 = autograd.jacobian(dObj_dParams, argnum=1)134 d2Obj_dParamsdWeights = autograd.jacobian(dObj_dParams, argnum=0)135 136 # Have to re-copy this into self.params after every autograd call, as137 # autograd turns self.params.get_free() into an ArrayBox (whereas we want138 # it to be a numpy array)139 #array_box_go_away = self.params.get_free().copy()140 #cur_weights = self.example_weights.copy()141 start = time.time()142 grads = self.compute_gradients(some_example_weights)143 X = self.training_data.X144 if solver_method == 'cholesky':145 eval_reg_hess = autograd.hessian(self.regularization)146 tmp = self.params.get_free().copy()147 reg_hess = eval_reg_hess(self.params.get_free())148 reg_hess[-1,:] = 0.0149 reg_hess[:,-1] = 0.0150 self.params.set_free(tmp)151 self.dParams_dWeights = -solvers.ihvp_cholesky(grads,152 X,153 self.D2,154 regularizer_hessian=reg_hess)155 elif solver_method == 'agarwal':156 eval_reg_hess = autograd.hessian(self.regularization)157 tmp = self.params.get_free().copy()158 reg_hess = eval_reg_hess(self.params.get_free())159 reg_hess[-1,:] = 0.0160 reg_hess[:,-1] = 0.0161 self.params.set_free(tmp)162 self.dParams_dWeights = -solvers.ihvp_agarwal(grads,163 X,164 self.D2,165 regularizer_hessian=reg_hess,166 **kwargs)167 elif solver_method == 'lanczos':168 print('NOTE lanczos currently assumes l2 regularization')169 self.dParams_dWeights = -solvers.ihvp_exactEvecs(grads,170 X,171 self.D2,172 rank=rank,173 L2Lambda=self.L2Lambda)174 elif solver_method == 'tropp':175 print('NOTE tropp currently assumes l2 regularization')176 self.dParams_dWeights = -solvers.ihvp_tropp(grads,177 X,178 self.D2,179 L2Lambda=self.L2Lambda,180 rank=rank)181 182 183 184 #self.params.set_free(array_box_go_away)185 #self.example_weights = cur_weights186 self.non_fixed_dims = non_fixed_dims187 def retrain_with_weights(self, new_example_weights,188 doIJAppx=False, doNSAppx=False,189 label=None,190 is_cv=False,191 non_fixed_dims=None,192 **kwargs):193 '''194 in_place: updates weights and params based on the new data195 Can do things a bit more efficiently if it's cross-validation; you actually196 don't need to multiply (KxN) times (Nx1) vector; just select the components197 that have been left out198 ''' 199 if doIJAppx: # i.e. infinitesimal jackknife approx200 delta_example_weights = new_example_weights - self.example_weights201 if is_cv and False:202 left_out_inds = np.where(delta_example_weights == 0)203 new_params = self.params.get_free()204 new_params += self.dParams_dWeights[:,left_out_inds].sum(axis=1)205 else:206 if non_fixed_dims is None:207 new_params = self.params.get_free() + self.dParams_dWeights.dot(208 delta_example_weights)209 else:210 new_params = self.params.get_free()211 new_params[non_fixed_dims] += self.dParams_dWeights.dot(212 delta_example_weights)213 elif doNSAppx: # i.e., Newton step based approx214 if is_cv and self.is_a_glm: # Can do rank-1 update215 n = np.where(new_example_weights != 1)[0]216 new_params = self.params.get_free().copy()217 new_params[non_fixed_dims] += self.loocv_rank_one_updates[n,:].squeeze()218 else:219 self.compute_dParams_dWeights(new_example_weights,220 non_fixed_dims=non_fixed_dims)221 delta_example_weights = new_example_weights - self.example_weights222 new_params = self.params.get_free().copy()223 new_params[non_fixed_dims] += self.dParams_dWeights.dot(224 delta_example_weights)225 else: # non-approximate: re-fit the model226 curr_params = copy.copy(self.params.get_free())227 curr_example_weights = copy.copy(self.example_weights)228 self.example_weights = new_example_weights229 self.fit(**kwargs)230 new_params = copy.copy(self.params.get_free())231 232 self.params.set_free(new_params)233 return new_params234 235 def predict_probability(self, X):236 pass237 def get_error(self, test_data, metric):238 pass239 def get_single_datapoint_hessian(self, n):240 X = self.training_data.X241 Y = self.training_data.Y242 weights = self.example_weights243 self.training_data.X = X[n].reshape(1,-1)244 self.training_data.Y = Y[n]245 self.example_weights = np.ones(1)246 array_box_go_away = copy.copy(self.params.get_free())247 dObj_dParams = autograd.jacobian(self.weighted_model_objective, argnum=1)248 d2Obj_dParams2 = autograd.jacobian(dObj_dParams, argnum=1)249 hess_n = d2Obj_dParams2(self.example_weights, self.params.get_free())250 self.params.set_free(array_box_go_away)251 self.training_data.X = X252 self.training_data.Y = Y253 self.example_weights = weights254 return hess_n255 def get_single_datapoint_hvp(self, n, vec):256 '''257 Returns Hessian.dot(vec), where the Hessian is the Hessian of the objective258 function with just datapoint n259 ''' 260 X = self.training_data.X261 Y = self.training_data.Y262 weights = self.example_weights263 self.training_data.X = X[n].reshape(1,-1)264 self.training_data.Y = Y[n]265 self.example_weights = np.ones(1)266 267 array_box_go_away = copy.copy(self.params.get_free())268 eval_hvp = autograd.hessian_vector_product(self.weighted_model_objective,269 argnum=1)270 hess_n_dot_vec = eval_hvp(self.example_weights, self.params.get_free(), vec)271 272 self.params.set_free(array_box_go_away)273 self.training_data.X = X274 self.training_data.Y = Y275 self.example_weights = weights276 return hess_n_dot_vec277 def get_all_data_hvp(self, vec):278 '''279 Returns Hessian.dot(vec), where the Hessian is the Hessian of the objective280 function with all the data.281 '''282 array_box_go_away = copy.copy(self.params.get_free())283 eval_hvp = autograd.hessian_vector_product(self.weighted_model_objective,284 argnum=1)285 hvp = eval_hvp(self.example_weights, self.params.get_free(), vec)286 287 self.params.set_free(array_box_go_away)288 return hvp289 def compute_hessian(self):290 dObj_dParams = autograd.jacobian(self.weighted_model_objective, argnum=1)291 d2Obj_dParams2 = autograd.jacobian(dObj_dParams, argnum=1)292 array_box_go_away = self.params.get_free().copy()293 hessian = d2Obj_dParams2(self.example_weights, self.params.get_free())294 self.params.set_free(array_box_go_away)295 self.hessian = hessian296 def compute_restricted_hessian_and_dParamsdWeights(self, dims, weights,297 comp_dParams_dWeights=True):298 '''299 Computes the dims.shape[0] by dims.shape[0] Hessian only along the entries300 in dims (used when using l_1 regularization)301 '''302 theta0 = self.params.get_free()303 304 # Objective to differentiate just along the dimensions specified305 def lowDimObj(weights, thetaOnDims, thetaOffDims, invPerm):306 allDims = np.append(dims, offDims)307 thetaFull = np.append(thetaOnDims, thetaOffDims)[invPerm]308 return self.weighted_model_objective(weights, thetaFull)309 310 offDims = np.setdiff1d(np.arange(self.params.get_free().shape[0]), dims)311 thetaOnDims = theta0[dims]312 thetaOffDims = theta0[offDims]313 # lowDimObj will concatenate thetaOnDims, thetaOffDims, then needs to314 # un-permute them into the original theta.315 allDims = np.append(dims, offDims)316 invPerm = np.zeros(theta0.shape[0], dtype=np.int32)317 for i, idx in enumerate(allDims):318 invPerm[idx] = i319 evalHess = autograd.hessian(lowDimObj, argnum=1)320 array_box_go_away = self.params.get_free().copy()321 restricted_hess = evalHess(weights,322 thetaOnDims,323 thetaOffDims,324 invPerm)325 self.params.set_free(theta0)326 dObj_dParams = autograd.jacobian(lowDimObj, argnum=1)327 d2Obj_dParamsdWeights = autograd.jacobian(dObj_dParams, argnum=0)328 if comp_dParams_dWeights:329 restricted_dParamsdWeights = d2Obj_dParamsdWeights(weights,330 thetaOnDims,331 thetaOffDims,332 invPerm)333 return restricted_hess, restricted_dParamsdWeights334 else:335 return restricted_hess336 337 338 def hessian_inverse_vector_product(self, vec, hessian_scaling,339 S1=None, S2=None, method='stochastic'):340 '''341 From Agarwal et. al. "Second-order stochastic optimization for machine342 learning in linear time." 2017. 343 Not clear that this provides good accuracy in a reasonable amount of time.344 '''345 N = self.training_data.X.shape[0]346 D = vec.shape[0]347 if S1 is None and S2 is None:348 S1 = int(np.ceil(np.sqrt(N)/10))349 S2 = int(np.ceil(10*np.sqrt(N)))350 hivpEsts = np.zeros((S1,D))351 for ii in range(S1):352 hivpEsts[ii] = vec353 for n in range(1,S2):354 idx = np.random.choice(N)355 #H_n_prod_prev = self.get_single_datapoint_hvp(idx, hivpEsts[ii]) * N356 #H_n_prod_prev /= hessian_scaling357 H_n_prod_prev = self.get_all_data_hvp(hivpEsts[ii]) / hessian_scaling358 hivpEsts[ii] = vec + hivpEsts[ii] - H_n_prod_prev359 return np.mean(hivpEsts, axis=0) / hessian_scaling360 def stochastic_hessian_inverse(self, hessian_scaling, S1=None, S2=None):361 '''362 From Agarwal et. al. "Second-order stochastic optimization for machine363 learning in linear time." 2017. 364 Not clear that this provides good accuracy in a reasonable amount of time.365 '''366 self.compute_derivs()367 X = self.training_data.X368 N = self.training_data.X.shape[0]369 D = self.params.get_free().shape[0]370 if S1 is None and S2 is None:371 S1 = int(np.sqrt(N)/10)372 S2 = int(10*np.sqrt(N))373 if self.regularization is not None:374 evalRegHess = autograd.hessian(self.regularization)375 paramsCpy = self.params.get_free().copy()376 regHess = evalRegHess(self.params.get_free())377 regHess[-1,-1] = 0.0378 self.params.set_free(paramsCpy)379 380 hinvEsts = np.zeros((S1,D,D))381 for ii in range(S1):382 hinvEsts[ii] = np.eye(D)383 for n in range(1,S2):384 idx = np.random.choice(N)385 H_n = np.outer(X[idx],X[idx]) * self.D2[idx] * N + regHess386 if np.linalg.norm(H_n) >= hessian_scaling*0.9999:387 print(np.linalg.norm(H_n))388 #H_n = self.get_single_datapoint_hessian(idx) * N389 H_n /= hessian_scaling390 hinvEsts[ii] = np.eye(D) + (np.eye(D) - H_n).dot(hinvEsts[ii])391 return np.mean(hinvEsts, axis=0) / hessian_scaling392 def compute_loocv_rank_one_updates(self,393 non_fixed_dims=None,394 **kwargs):395 '''396 When the model is a GLM and you're doing approximate LOOCV with the Newton397 step approximation, rank one matrix inverse updates allow you to use only398 O(D^3), rather than O(ND^3) computation.399 '''400 X = self.training_data.X401 N = X.shape[0]402 if non_fixed_dims is None:403 non_fixed_dims = np.arange(self.params.get_free().shape[0])404 if len(non_fixed_dims) == 0:405 self.loocv_rank_one_updates = np.zeros((N,0))406 return407 408 409 X_S = X[:,non_fixed_dims]410 hivps = self.inverse_hessian_vector_product(X.T, **kwargs).T411 X_S_hivps = np.einsum('nd,nd->n', X_S, hivps)412 updates = 1 + (self.D2 * X_S_hivps) / (1 - self.D2 * X_S_hivps)413 self.loocv_rank_one_updates = (updates*self.D1)[:,np.newaxis] * hivps414 def inverse_hessian_vector_product(self, b,415 solver_method='cholesky',416 rank=1,417 **kwargs):418 X = self.training_data.X419 if solver_method == 'cholesky':420 eval_reg_hess = autograd.hessian(self.regularization)421 tmp = self.params.get_free().copy()422 reg_hess = eval_reg_hess(self.params.get_free())423 reg_hess[-1,:] = 0.0424 reg_hess[:,-1] = 0.0425 self.params.set_free(tmp)426 return solvers.ihvp_cholesky(b,427 X,428 self.D2,429 regularizer_hessian=reg_hess)430 elif solver_method == 'agarwal':431 eval_reg_hess = autograd.hessian(self.regularization)432 tmp = self.params.get_free().copy()433 reg_hess = eval_reg_hess(self.params.get_free())434 reg_hess[-1,:] = 0.0435 reg_hess[:,-1] = 0.0436 self.params.set_free(tmp)437 return solvers.ihvp_agarwal(b,438 X,439 self.D2,440 regularizer_hessian=reg_hess,441 **kwargs)442 elif solver_method == 'lanczos':443 print('NOTE lanczos currently assumes l2 regularization')444 return solvers.ihvp_exactEvecs(b,445 X,446 self.D2,447 rank=rank,448 L2Lambda=self.L2Lambda)449 elif solver_method == 'tropp':450 print('NOTE tropp currently assumes l2 regularization')451 return solvers.ihvp_tropp(b,452 X,453 self.D2,454 L2Lambda=self.L2Lambda,455 rank=rank)456 457 458class PoissonRegressionModel(GeneralizedLinearModel):459 '''460 Poisson regression with:461 y_n ~ Poi( log(1 + e^{<x_n, \theta>}) )462 '''463 def __init__(self, *args, **kwargs):464 super(PoissonRegressionModel, self).__init__(*args, **kwargs)465 def eval_objective(self, free_params):466 self.params.set_free(free_params)467 params = self.params['w'].get()468 Y = self.training_data.Y469 params_x = np.dot(self.training_data.X, params)470 M = np.maximum(params_x, 0.0)471 lam = np.log(np.exp(0-M) + np.exp(params_x-M)) + M472 ret = Y*np.log(lam + 1e-15) - lam473 ll = (-(ret*self.example_weights).sum())474 return ll + self.regularization(params)475 def get_error(self, test_data, metric="mse"):476 if metric == "mse":477 X = test_data.X478 Y = test_data.Y479 params = self.params['w'].get()480 params_x = np.dot(X, params)481 stacked = np.stack([params_x, np.zeros(params_x.shape[0])], axis=0)482 lam = scipy.special.logsumexp(stacked, axis=0)483 Yhat = lam484 return np.mean((Yhat-Y)**2)485 def compute_derivs(self):486 '''487 lazy slow AD-based implementation ... should actually hand-compute488 these for any serious use.489 '''490 Y = self.training_data.Y491 z = self.training_data.X.dot(self.params.get_free())492 f = lambda z, Y: -(Y*np.log(np.log1p(np.exp(z))) - np.log1p(np.exp(z)))493 grad = autograd.grad(f, argnum=0)494 grad2 = autograd.grad(grad)495 self.D1 = np.zeros(Y.shape[0])496 self.D2 = np.zeros(Y.shape[0])497 for n in range(Y.shape[0]):498 self.D1[n] = grad(z[n], Y[n])499 self.D2[n] = grad2(z[n], Y[n])500 501class ExponentialPoissonRegressionModel(GeneralizedLinearModel):502 '''503 Poisson regression with:504 y_n ~ Poi( e^{<x_n, \theta>} )505 '''506 def __init__(self, *args, **kwargs):507 self.L1Lambda = None508 super(ExponentialPoissonRegressionModel, self).__init__(*args, **kwargs)509 510 def eval_objective(self, free_params):511 self.params.set_free(free_params)512 params = self.params['w'].get()513 Y = self.training_data.Y514 params_x_bias = np.dot(self.training_data.X, params)515 ret = Y*params_x_bias - np.exp(params_x_bias)516 ll = (-(ret*self.example_weights).sum())517 518 return ll + self.regularization(params)519 def fit(self, warm_start=True, label=None, save=False,520 use_glmnet=False,521 **kwargs):522 '''523 Note: use_glmnet only works with CV weights (i.e. all 0 or 1)524 '''525 if not use_glmnet:526 super(ExponentialPoissonRegressionModel, self).fit(warm_start,527 label,528 save,529 **kwargs)530 elif use_glmnet:531 from glmnet_py import glmnet532 lambdau = np.array([self.L1Lambda,])533 inds = self.example_weights.astype(np.bool)534 x = self.training_data.X[inds,:].copy()535 y = self.training_data.Y[inds].copy().astype(np.float)536 fit = glmnet(x=x,537 y=y,538 family='poisson',539 standardize=False,540 lambdau=lambdau,541 thresh=1e-20,542 maxit=10e4,543 alpha=1.0,544 )545 546 def compute_derivs(self):547 '''548 For use from fitL1.py.549 '''550 Y = self.training_data.Y551 params = self.params.get_free()552 #exp_params_X = np.exp(self.training_data.X.dot(self.params['w'].get()))553 exp_params_X = np.exp(self.training_data.X.dot(params))554 self.D1 = -(Y - exp_params_X)555 self.D2 = -(-exp_params_X)556 557 def get_error(self, test_data, metric="mse"):558 if metric == "mse":559 X = test_data.X560 Y = test_data.Y561 params = self.params['w'].get()562 params_x_bias = np.dot(X, params)563 lam = np.exp(params_x_bias)564 Yhat = lam565 return np.mean((Yhat-Y)**2)566 def compute_bounds(self):567 '''568 Used for low-rank CV paper. Assumes l2 regularization.569 Note these bounds are assuming the objective is **not** scaled570 by 1/N571 '''572 X = self.training_data.X573 D1 = self.D1574 D2 = self.D2575 lam = self.L2Lambda576 thetaHat = self.params.get_free()577 thetaThetanBnd = 1/(lam) * np.abs(D1) * np.linalg.norm(X, axis=1)578 exactBnd = np.abs(X.dot(thetaHat)) + thetaThetanBnd * np.linalg.norm(X, axis=1)579 MnBnd = np.exp(exactBnd)580 LipBnd = ((np.linalg.norm(X, axis=1)**2).sum() - np.linalg.norm(X, axis=1)**2) * MnBnd581 self.IJBnd = LipBnd / (lam**3) * D1**2 * np.linalg.norm(X, axis=1)**3 + 1/lam**2 * D2 * np.abs(D1) * np.linalg.norm(X, axis=1)**4582 self.NSBnd = LipBnd / (lam**3) * D1**2 * np.linalg.norm(X, axis=1)**3583 584 585class LogisticRegressionModel(GeneralizedLinearModel):586 587 def __init__(self, *args, **kwargs):588 super(LogisticRegressionModel, self).__init__(*args, **kwargs)589 def fit(self, warm_start=True, label=None, save=False,590 use_glmnet=False,591 **kwargs):592 '''593 Note: use_glmnet only works with CV weights (i.e. all 0 or 1)594 '''595 596 if not use_glmnet:597 super(LogisticRegressionModel, self).fit(warm_start,598 label,599 save,600 **kwargs)601 elif use_glmnet:602 from glmnet_py import glmnet603 lambdau = np.array([self.L1Lambda / self.training_data.X.shape[0],])604 inds = self.example_weights.astype(np.bool)605 x = self.training_data.X[inds,:-1].copy()606 y = self.training_data.Y[inds].copy().astype(np.float)607 y[np.where(y==-1)] = 0.0608 fit = glmnet(x=x,609 y=y,610 family='binomial',611 standardize=True,612 lambdau=lambdau,613 thresh=1e-10,614 maxit=10e3,615 alpha=1.0,616 )617 self.params.set_free(np.append(fit['beta'], 0))618 return619 def eval_objective(self, free_params):620 self.params.set_free(free_params)621 params = self.params['w'].get()622 X = self.training_data.X623 Y = self.training_data.Y624 paramsXY = -Y * (np.dot(X, params))625 M = np.maximum(paramsXY, 0)626 log_likelihood = -(np.log(np.exp(0-M) + np.exp(paramsXY-M)) + M)627 return ( -(log_likelihood*self.example_weights).sum() +628 self.regularization(params) )629 630 def predict_probability(self, X):631 return utils.sigmoid(X, self.params.get_free())632 def predict_target(self, X):633 probs = self.predict_probability(X)634 probs[np.where(probs > .5)] = 1635 probs[np.where(probs <= .5)] = -1636 return probs637 def compute_derivs(self):638 '''639 For use from fitL1.py640 '''641 Y = self.training_data.Y642 params = self.params.get_free()643 exp_params_XY = np.exp(Y *644 self.training_data.X.dot(params))645 self.D1 = -Y/ (1 + exp_params_XY)646 self.D2 = -Y*self.D1 - (self.D1)**2647 648 def get_error(self, test_data, metric='log_likelihood'):649 if metric == "accuracy":650 # change Y_Test to 01 if required651 return 1.0 * np.where(652 self.predict_target(test_data.X) != test_data.Y)[0].shape[0] / test_data.N653 elif metric == 'log_likelihood':654 train_data = self.training_data655 weights = self.example_weights656 self.training_data = test_data657 self.example_weights = np.ones(test_data.X.shape[0])658 nll = self.eval_objective(self.params.get_free())659 nll -= self.regularization(self.params.get_free())660 self.training_data = train_data661 self.example_weights = weights662 return nll / test_data.X.shape[0]663class LinearRegressionModel(GeneralizedLinearModel):664 def __init__(self, *args, **kwargs):665 super(LinearRegressionModel, self).__init__(*args, **kwargs)666 def eval_objective(self, free_params):667 '''668 Objective that we minimize; \sum_n w_n f(x_n, \theta) + ||\theta||_2669 '''670 self.params.set_free(free_params)671 params = self.params['w'].get()672 params_x = np.dot(self.training_data.X, params)673 sq_error = (self.training_data.Y - params_x)**2 * self.example_weights674 return sq_error.sum() + self.regularization(params[:-1])675 def get_error(self, test_data, metric="mse"):676 if metric == "mse":677 Yhat = np.dot(test_data.X, self.params.get_free())678 Y = test_data.Y679 return np.mean((Yhat - Y)**2)680 def compute_derivs(self):681 '''682 First and second derivatives of link function, used in fitL1.py683 '''684 Y = self.training_data.Y685 params_x = self.training_data.X.dot(self.params.get_free())686 self.D1 = -2*(Y - params_x)687 self.D2 = 2*np.ones(Y.shape[0])688 689 def fit(self, warm_start=True, label=None, save=False,690 use_glmnet=False, **kwargs):691 '''692 Note: this only works with CV weights (i.e. all 0 or 1)693 '''694 if not use_glmnet:695 super(LinearRegressionModel, self).fit(warm_start,696 label,697 save,698 **kwargs)699 elif use_glmnet:700 from glmnet_py import glmnet701 inds = self.example_weights.astype(np.bool)702 x = self.training_data.X[inds,:].copy()703 y = self.training_data.Y[inds].copy().astype(np.float)704 lambdau = np.array([self.L1Lambda/(2*x.shape[0]),])705 706 fit = glmnet(x=x[:,:-1],707 y=y,708 family='gaussian',709 standardize=True,710 lambdau=lambdau,711 thresh=1e-10,712 maxit=10e4,713 alpha=1.0,714 )715 self.params.set_free(np.append(np.squeeze(fit['beta']), fit['a0']))716 717class ProbitRegressionModel(GeneralizedLinearModel):718 def __init__(self, *args, **kwargs):719 super(ProbitRegressionModel, self).__init__(*args, **kwargs)720 def eval_objective(self, free_params):721 self.params.set_free(free_params)722 params_no_bias = self.params['w'].get()[:-1]723 bias = self.params['w'].get()[-1]724 y_x_params = self.training_data.Y * (725 np.dot(self.training_data.X, params_no_bias) + bias)726 log_likelihood = \727 autograd.scipy.stats.norm.logcdf(y_x_params) * self.example_weights728 return -(log_likelihood).sum() + self.regularization(params_no_bias)729 def predict_probability(self, X):730 params_no_bias = self.params.get_free()[:-1]731 bias = self.params.get_free()[-1]732 return autograd.scipy.stats.norm.cdf(X.dot(params_no_bias) + bias)733 def predict_target(self, X):734 probs = self.predict_probability(X)735 probs[np.where(probs > .5)] = 1736 probs[np.where(probs <= .5)] = -1737 return probs738 def get_error(self, test_data, metric="log_likelihood"):739 if metric == "accuracy":740 # change Y_Test to 01 if required741 return np.where(742 self.predict_target(test_data.X) != test_data.Y)[0].shape[0] / test_data.N743 elif metric == 'log_likelihood':744 train_data = self.training_data745 weights = self.example_weights746 self.training_data = test_data747 self.example_weights = np.ones(test_data.X.shape[0])748 nll = self.eval_objective(self.params.get_free())749 self.training_data = train_data...

Full Screen

Full Screen

axiom.py

Source:axiom.py Github

copy

Full Screen

...41 for i in range(len(axioms)):42 if check(expr, axioms[i], dict()):43 return i + 144 return 045def get_free(expr, variables, free_variables):46 if type(expr) is Variable:47 if expr not in variables.keys():48 free_variables.add(expr.val)49 elif type(expr) is Predicate:50 for val in expr.val:51 get_free(val, variables, free_variables)52 elif type(expr) is Any or type(expr) is Exists:53 if expr.var not in variables.keys():54 variables[expr.var] = 155 else:56 variables[expr.var] += 157 get_free(expr.val, variables, free_variables)58 variables[expr.var] -= 159 if variables[expr.var] == 0:60 variables.pop(expr.var)61 elif isinstance(expr, Unary):62 return get_free(expr.val, variables, free_variables)63 elif isinstance(expr, Binary):64 return get_free(expr.left, variables, free_variables) | get_free(expr.right, variables,65 free_variables)66 return free_variables67def get_free_variables(expr):68 s = set()69 get_free(expr, dict(), s)70 return s71def free_subtract(axiom, expr, variable, locked_variables, variables):72 if type(axiom) is Variable:73 if axiom != variable:74 return axiom == expr75 if axiom.val in locked_variables:76 return axiom == expr77 else:78 if axiom not in variables:79 freeVariables = get_free_variables(expr)80 if len(freeVariables.intersection(locked_variables)) != 0:81 return False82 variables[axiom] = expr83 return True...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful