How to use log_kernel method in autotest

Best Python code snippet using autotest_python

svgd.py

Source:svgd.py Github

copy

Full Screen

1# Copyright (c) 2017-2019 Uber Technologies, Inc.2# SPDX-License-Identifier: Apache-2.03import math4from abc import ABCMeta, abstractmethod5import torch6from torch.distributions import biject_to7import pyro8from pyro import poutine9from pyro.distributions import Delta10from pyro.distributions.util import copy_docs_from11from pyro.infer.autoguide.guides import AutoContinuous12from pyro.infer.autoguide.initialization import init_to_sample13from pyro.infer.trace_elbo import Trace_ELBO14def vectorize(fn, num_particles, max_plate_nesting):15 def _fn(*args, **kwargs):16 with pyro.plate(17 "num_particles_vectorized", num_particles, dim=-max_plate_nesting - 118 ):19 return fn(*args, **kwargs)20 return _fn21class _SVGDGuide(AutoContinuous):22 """23 This modification of :class:`AutoContinuous` is used internally in the24 :class:`SVGD` inference algorithm.25 """26 def __init__(self, model):27 super().__init__(model, init_loc_fn=init_to_sample)28 def get_posterior(self, *args, **kwargs):29 svgd_particles = pyro.param("svgd_particles", self._init_loc)30 return Delta(svgd_particles, event_dim=1)31class SteinKernel(object, metaclass=ABCMeta):32 """33 Abstract class for kernels used in the :class:`SVGD` inference algorithm.34 """35 @abstractmethod36 def log_kernel_and_grad(self, particles):37 """38 Compute the component kernels and their gradients.39 :param particles: a tensor with shape (N, D)40 :returns: A pair (`log_kernel`, `kernel_grad`) where `log_kernel` is a (N, N, D)-shaped41 tensor equal to the logarithm of the kernel and `kernel_grad` is a (N, N, D)-shaped42 tensor where the entry (n, m, d) represents the derivative of `log_kernel` w.r.t.43 x_{m,d}, where x_{m,d} is the d^th dimension of particle m.44 """45 raise NotImplementedError46@copy_docs_from(SteinKernel)47class RBFSteinKernel(SteinKernel):48 """49 A RBF kernel for use in the SVGD inference algorithm. The bandwidth of the kernel is chosen from the50 particles using a simple heuristic as in reference [1].51 :param float bandwidth_factor: Optional factor by which to scale the bandwidth, defaults to 1.0.52 :ivar float ~.bandwidth_factor: Property that controls the factor by which to scale the bandwidth53 at each iteration.54 References55 [1] "Stein Variational Gradient Descent: A General Purpose Bayesian Inference Algorithm,"56 Qiang Liu, Dilin Wang57 """58 def __init__(self, bandwidth_factor=None):59 """60 :param float bandwidth_factor: Optional factor by which to scale the bandwidth61 """62 self.bandwidth_factor = bandwidth_factor63 def _bandwidth(self, norm_sq):64 """65 Compute the bandwidth along each dimension using the median pairwise squared distance between particles.66 """67 num_particles = norm_sq.size(0)68 index = torch.arange(num_particles)69 norm_sq = norm_sq[index > index.unsqueeze(-1), ...]70 median = norm_sq.median(dim=0)[0]71 if self.bandwidth_factor is not None:72 median = self.bandwidth_factor * median73 assert median.shape == norm_sq.shape[-1:]74 return median / math.log(num_particles + 1)75 @torch.no_grad()76 def log_kernel_and_grad(self, particles):77 delta_x = particles.unsqueeze(0) - particles.unsqueeze(1) # N N D78 assert delta_x.dim() == 379 norm_sq = delta_x.pow(2.0) # N N D80 h = self._bandwidth(norm_sq) # D81 log_kernel = -(norm_sq / h) # N N D82 grad_term = 2.0 * delta_x / h # N N D83 assert log_kernel.shape == grad_term.shape84 return log_kernel, grad_term85 @property86 def bandwidth_factor(self):87 return self._bandwidth_factor88 @bandwidth_factor.setter89 def bandwidth_factor(self, bandwidth_factor):90 """91 :param float bandwidth_factor: Optional factor by which to scale the bandwidth92 """93 if bandwidth_factor is not None:94 assert bandwidth_factor > 0.0, "bandwidth_factor must be positive."95 self._bandwidth_factor = bandwidth_factor96@copy_docs_from(SteinKernel)97class IMQSteinKernel(SteinKernel):98 r"""99 An IMQ (inverse multi-quadratic) kernel for use in the SVGD inference algorithm [1]. The bandwidth of the kernel100 is chosen from the particles using a simple heuristic as in reference [2]. The kernel takes the form101 :math:`K(x, y) = (\alpha + ||x-y||^2/h)^{\beta}`102 where :math:`\alpha` and :math:`\beta` are user-specified parameters and :math:`h` is the bandwidth.103 :param float alpha: Kernel hyperparameter, defaults to 0.5.104 :param float beta: Kernel hyperparameter, defaults to -0.5.105 :param float bandwidth_factor: Optional factor by which to scale the bandwidth, defaults to 1.0.106 :ivar float ~.bandwidth_factor: Property that controls the factor by which to scale the bandwidth107 at each iteration.108 References109 [1] "Stein Points," Wilson Ye Chen, Lester Mackey, Jackson Gorham, Francois-Xavier Briol, Chris. J. Oates.110 [2] "Stein Variational Gradient Descent: A General Purpose Bayesian Inference Algorithm," Qiang Liu, Dilin Wang111 """112 def __init__(self, alpha=0.5, beta=-0.5, bandwidth_factor=None):113 """114 :param float alpha: Kernel hyperparameter, defaults to 0.5.115 :param float beta: Kernel hyperparameter, defaults to -0.5.116 :param float bandwidth_factor: Optional factor by which to scale the bandwidth117 """118 assert alpha > 0.0, "alpha must be positive."119 assert beta < 0.0, "beta must be negative."120 self.alpha = alpha121 self.beta = beta122 self.bandwidth_factor = bandwidth_factor123 def _bandwidth(self, norm_sq):124 """125 Compute the bandwidth along each dimension using the median pairwise squared distance between particles.126 """127 num_particles = norm_sq.size(0)128 index = torch.arange(num_particles)129 norm_sq = norm_sq[index > index.unsqueeze(-1), ...]130 median = norm_sq.median(dim=0)[0]131 if self.bandwidth_factor is not None:132 median = self.bandwidth_factor * median133 assert median.shape == norm_sq.shape[-1:]134 return median / math.log(num_particles + 1)135 @torch.no_grad()136 def log_kernel_and_grad(self, particles):137 delta_x = particles.unsqueeze(0) - particles.unsqueeze(1) # N N D138 assert delta_x.dim() == 3139 norm_sq = delta_x.pow(2.0) # N N D140 h = self._bandwidth(norm_sq) # D141 base_term = self.alpha + norm_sq / h142 log_kernel = self.beta * torch.log(base_term) # N N D143 grad_term = (-2.0 * self.beta) * delta_x / h # N N D144 grad_term = grad_term / base_term145 assert log_kernel.shape == grad_term.shape146 return log_kernel, grad_term147 @property148 def bandwidth_factor(self):149 return self._bandwidth_factor150 @bandwidth_factor.setter151 def bandwidth_factor(self, bandwidth_factor):152 """153 :param float bandwidth_factor: Optional factor by which to scale the bandwidth154 """155 if bandwidth_factor is not None:156 assert bandwidth_factor > 0.0, "bandwidth_factor must be positive."157 self._bandwidth_factor = bandwidth_factor158class SVGD:159 """160 A basic implementation of Stein Variational Gradient Descent as described in reference [1].161 :param model: The model (callable containing Pyro primitives). Model must be fully vectorized162 and may only contain continuous latent variables.163 :param kernel: a SVGD compatible kernel like :class:`RBFSteinKernel`.164 :param optim: A wrapper for a PyTorch optimizer.165 :type optim: pyro.optim.PyroOptim166 :param int num_particles: The number of particles used in SVGD.167 :param int max_plate_nesting: The max number of nested :func:`pyro.plate` contexts in the model.168 :param str mode: Whether to use a Kernelized Stein Discrepancy that makes use of `multivariate`169 test functions (as in [1]) or `univariate` test functions (as in [2]). Defaults to `univariate`.170 Example usage:171 .. code-block:: python172 from pyro.infer import SVGD, RBFSteinKernel173 from pyro.optim import Adam174 kernel = RBFSteinKernel()175 adam = Adam({"lr": 0.1})176 svgd = SVGD(model, kernel, adam, num_particles=50, max_plate_nesting=0)177 for step in range(500):178 svgd.step(model_arg1, model_arg2)179 final_particles = svgd.get_named_particles()180 References181 [1] "Stein Variational Gradient Descent: A General Purpose Bayesian Inference Algorithm,"182 Qiang Liu, Dilin Wang183 [2] "Kernelized Complete Conditional Stein Discrepancy,"184 Raghav Singhal, Saad Lahlou, Rajesh Ranganath185 """186 def __init__(187 self, model, kernel, optim, num_particles, max_plate_nesting, mode="univariate"188 ):189 assert callable(model)190 assert isinstance(kernel, SteinKernel), "Must provide a valid SteinKernel"191 assert isinstance(192 optim, pyro.optim.PyroOptim193 ), "Must provide a valid Pyro optimizer"194 assert num_particles > 1, "Must use at least two particles"195 assert max_plate_nesting >= 0196 assert mode in [197 "univariate",198 "multivariate",199 ], "mode must be one of (univariate, multivariate)"200 self.model = vectorize(model, num_particles, max_plate_nesting)201 self.kernel = kernel202 self.optim = optim203 self.num_particles = num_particles204 self.max_plate_nesting = max_plate_nesting205 self.mode = mode206 self.loss = Trace_ELBO().differentiable_loss207 self.guide = _SVGDGuide(self.model)208 def get_named_particles(self):209 """210 Create a dictionary mapping name to vectorized value, of the form ``{name: tensor}``.211 The leading dimension of each tensor corresponds to particles, i.e. this creates a struct of arrays.212 """213 return {214 site["name"]: biject_to(site["fn"].support)(unconstrained_value)215 for site, unconstrained_value in self.guide._unpack_latent(216 pyro.param("svgd_particles")217 )218 }219 @torch.no_grad()220 def step(self, *args, **kwargs):221 """222 Computes the SVGD gradient, passing args and kwargs to the model,223 and takes a gradient step.224 :return dict: A dictionary of the form {name: float}, where each float225 is a mean squared gradient. This can be used to monitor the convergence of SVGD.226 """227 # compute gradients of log model joint228 with torch.enable_grad(), poutine.trace(param_only=True) as param_capture:229 loss = self.loss(self.model, self.guide, *args, **kwargs)230 loss.backward()231 # get particles used in the _SVGDGuide and reshape to have num_particles leading dimension232 particles = pyro.param("svgd_particles").unconstrained()233 reshaped_particles = particles.reshape(self.num_particles, -1)234 reshaped_particles_grad = particles.grad.reshape(self.num_particles, -1)235 # compute kernel ingredients236 log_kernel, kernel_grad = self.kernel.log_kernel_and_grad(reshaped_particles)237 if self.mode == "multivariate":238 kernel = log_kernel.sum(-1).exp()239 assert kernel.shape == (self.num_particles, self.num_particles)240 attractive_grad = torch.mm(kernel, reshaped_particles_grad)241 repulsive_grad = torch.einsum("nm,nm...->n...", kernel, kernel_grad)242 elif self.mode == "univariate":243 kernel = log_kernel.exp()244 assert kernel.shape == (245 self.num_particles,246 self.num_particles,247 reshaped_particles.size(-1),248 )249 attractive_grad = torch.einsum(250 "nmd,md->nd", kernel, reshaped_particles_grad251 )252 repulsive_grad = torch.einsum("nmd,nmd->nd", kernel, kernel_grad)253 # combine the attractive and repulsive terms in the SVGD gradient254 assert attractive_grad.shape == repulsive_grad.shape255 particles.grad = (attractive_grad + repulsive_grad).reshape(256 particles.shape257 ) / self.num_particles258 # compute per-parameter mean squared gradients259 squared_gradients = {260 site["name"]: value.mean().item()261 for site, value in self.guide._unpack_latent(particles.grad.pow(2.0))262 }263 # torch.optim objects gets instantiated for any params that haven't been seen yet264 params = set(265 site["value"].unconstrained() for site in param_capture.trace.nodes.values()266 )267 self.optim(params)268 # zero gradients269 pyro.infer.util.zero_grads(params)270 # return per-parameter mean squared gradients to user...

Full Screen

Full Screen

losses.py

Source:losses.py Github

copy

Full Screen

1from keras.losses import binary_crossentropy2import keras.backend as K3import cv24import numpy as np5import tensorflow as tf6def dice_loss(y_true, y_pred):7 #smooth = 1.8 y_true_f = K.flatten(y_true)9 y_pred_f = K.flatten(y_pred)10 intersection = K.sum(y_true_f * y_pred_f)11 return (2. * intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))12def bce_dice_loss(y_true, y_pred):13 return binary_crossentropy(y_true, y_pred) + (1 - dice_loss(y_true, y_pred))14def gauss2D(shape=(3,3),sigma=0.5):15 m, n = [(ss-1.)/2. for ss in shape]16 y, x = np.ogrid[-m:m+1,-n:n+1]17 h = np.exp(-(x*x + y*y) / (2.*sigma*sigma))18 h[h < np.finfo(h.dtype).eps*h.max()] = 019 sumh = h.sum()20 if sumh != 0:21 h /= sumh22 return h23log = np.array([24 [0.0448, 0.0468, 0.0564, 0.0468, 0.0448],25 [0.0468, 0.3167, 0.7146, 0.3167, 0.0468],26 [0.0564, 0.7146, -4.9048, 0.7146, 0.0564],27 [0.0468, 0.3167, 0.7146, 0.3167, 0.0468],28 [0.0448, 0.0468, 0.0564, 0.0468, 0.0448]]).astype(np.float32)29def weights_mask(mask):30 log_kernel = tf.convert_to_tensor(log)31 log_kernel = tf.reshape(log_kernel, [5, 5, 1, 1])32 log_kernel = tf.to_float(log_kernel)33 mask = tf.to_float(mask)34 edges = tf.nn.conv2d(mask, log_kernel, padding='SAME', strides=[1, 1, 1, 1])35 edges = edges > 0.9536 edges = tf.to_float(edges)37 gauss_kernel = tf.convert_to_tensor(gauss2D((5, 5), 2))38 gauss_kernel = tf.reshape(gauss_kernel, [5, 5, 1, 1])39 gauss_kernel = tf.to_float(gauss_kernel)40 return tf.nn.conv2d(edges, gauss_kernel, padding='SAME', strides=[1, 1, 1, 1])41def edges_dice_loss(y_true, y_pred):42 y_true_f = K.flatten(y_true)43 y_pred_f = K.flatten(y_pred)44 weights = K.flatten(weights_mask(y_true))45 intersection = K.sum(y_true_f * y_pred_f)46 eq = tf.equal(y_true_f, y_pred_f)47 eq = tf.to_float(eq)48 weightedEdges = K.sum(eq * weights)49 return (2. * intersection + weightedEdges) / (K.sum(y_true_f) + K.sum(y_pred_f) + K.sum(weights))50def bce_edges(y_true, y_pred):51 return binary_crossentropy(y_true, y_pred) + (1 - edges_dice_loss(y_true, y_pred))52# weight: weighted tensor(same shape with mask image)53def weighted_bce_loss(y_true, y_pred, weight):54 # avoiding overflow55 epsilon = 1e-756 y_pred = K.clip(y_pred, epsilon, 1. - epsilon)57 logit_y_pred = K.log(y_pred / (1. - y_pred))58 # https://www.tensorflow.org/api_docs/python/tf/nn/weighted_cross_entropy_with_logits59 loss = (1. - y_true) * logit_y_pred + (1. + (weight - 1.) * y_true) * \60 (K.log(1. + K.exp(-K.abs(logit_y_pred))) + K.maximum(-logit_y_pred, 0.))61 return K.sum(loss) / K.sum(weight)62def weighted_dice_loss(y_true, y_pred, weight):63 smooth = 1.64 w, m1, m2 = weight * weight, y_true, y_pred65 intersection = (m1 * m2)66 score = (2. * K.sum(w * intersection) + smooth) / (K.sum(w * m1) + K.sum(w * m2) + smooth)67 loss = 1. - K.sum(score)68 return loss69def weighted_bce_dice_loss(y_true, y_pred):70 y_true = K.cast(y_true, 'float32')71 y_pred = K.cast(y_pred, 'float32')72 # if we want to get same size of output, kernel size must be odd number73 averaged_mask = K.pool2d(74 y_true, pool_size=(11, 11), strides=(1, 1), padding='same', pool_mode='avg')75 border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')76 weight = K.ones_like(averaged_mask)77 w0 = K.sum(weight)78 weight += border * 279 w1 = K.sum(weight)80 weight *= (w0 / w1)81 loss = weighted_bce_loss(y_true, y_pred, weight) + \82 weighted_dice_loss(y_true, y_pred, weight)...

Full Screen

Full Screen

generate_log.py

Source:generate_log.py Github

copy

Full Screen

1import math2import numpy as np3# Function for calculating the laplacian of the gaussian at a given point and with a given variance4def log(x, y, sigma):5 numerator = ( (y**2)+(x**2)-2*(sigma**2) )6 denominator = ( (2*math.pi*(sigma**6) ))7 exponential = math.exp(-((x**2)+(y**2))/(2*(sigma**2)))8 return numerator*exponential/denominator9def generate_log(sigma):10 size = max(1,2*round(sigma*3)+1)11 w = math.ceil(float(size)*float(sigma))12 # If the dimension is an even number, make it odd13 if(w%2 == 0):14 w = w + 115 log_kernel = []16 w_range = int(math.floor(w/2))17 18 for i in range(-w_range, w_range):19 for j in range(-w_range, w_range):20 log_kernel.append(log(i,j,sigma))21 log_kernel = np.array(log_kernel)22 log_kernel = np.reshape(log_kernel, (w-1,w-1))23 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful