How to use set_attr method in autotest

Best Python code snippet using autotest_python

caffe_export_utils.py

Source:caffe_export_utils.py Github

copy

Full Screen

...48 op_type = self.op.type49 if op_type in CaffeOpExporter.onnx_to_caffe:50 op_type = CaffeOpExporter.onnx_to_caffe[op_type]51 return op_type52 def set_attr(self):53 pass54 def parse(self) -> ppl_caffe_pb2.LayerParameter:55 self.set_attr()56 self.layer.bottom[:] = [var.name for var in self.op.inputs if not var.is_parameter]57 self.layer.top[:] = [var.name for var in self.op.outputs]58 for var in self.op.parameters:59 blob = ppl_caffe_pb2.BlobProto()60 value = var.value61 value = convert_any_to_numpy(value)62 if var.meta is not None:63 shape = var.meta.shape64 dtype = DataType.to_numpy(var.meta.dtype)65 else:66 shape, dtype = value.shape, value.dtype67 blob.shape.dim.extend(shape)68 blob.data.extend(value.astype(dtype).flat)69 self.layer.blobs.extend([blob])70 return self.layer71@register_class72class Conv(CaffeOpExporter):73 def set_attr(self):74 kernel_h, kernel_w = refine_value(self.op.attributes.get('kernel_shape'))75 stride_h, stride_w = refine_value(self.op.attributes.get('strides', [1, 1]))76 dilations_h, dilations_w = refine_value(self.op.attributes.get('dilations', [1, 1]))77 pads = refine_value(self.op.attributes.get('pads', [0, 0]))78 if len(pads) == 2:79 pad_h, pad_w = pads80 elif len(pads) == 4:81 begin_pad = pads[:2]82 end_pad = pads[2:]83 if begin_pad == end_pad:84 pad_h, pad_w = begin_pad85 else:86 logger.error('Caffe only support begin_pad == end_pad in layer')87 else:88 logger.error(f'Unsupported pads attributes with the length of {len(pads)} in Caffe')89 self.layer.convolution_param.num_output = self.op.parameters[0].value.shape[0]90 self.layer.convolution_param.group = refine_value(self.op.attributes.get('group', 1))91 self.layer.convolution_param.kernel_h = kernel_h92 self.layer.convolution_param.kernel_w = kernel_w93 self.layer.convolution_param.pad_h = pad_h94 self.layer.convolution_param.pad_w = pad_w95 self.layer.convolution_param.stride_h = stride_h96 self.layer.convolution_param.stride_w = stride_w97 self.layer.convolution_param.hole_h = dilations_h98 self.layer.convolution_param.hole_w = dilations_w99 if len(self.op.parameters) == 2:100 self.layer.convolution_param.bias_term = True101 else:102 self.layer.convolution_param.bias_term = False103@register_class104class BatchNormalization(CaffeOpExporter):105 def set_attr(self):106 self.layer.bn_param.moving_average = bool(refine_value(self.op.attributes.get('training_mode', 0)))107 self.layer.bn_param.var_eps = refine_value(self.op.attributes.get('epsilon', 1e-05))108 if self.layer.bn_param.moving_average:109 self.layer.bn_param.decay = 1 - refine_value(self.op.attributes.get('momentum', 0.9))110 def parse(self):111 super(BatchNormalization, self).parse()112 channel = len(self.layer.blobs[3].data)113 for i in range(4):114 self.layer.blobs[i].shape.ClearField('dim')115 self.layer.blobs[i].shape.dim.extend([1, channel, 1, 1])116 return self.layer117@register_class118class Relu(CaffeOpExporter):119 pass120@register_class121class PRelu(CaffeOpExporter):122 pass123@register_class124class LeakyRelu(CaffeOpExporter):125 def set_attr(self):126 self.layer.relu_param.negative_slope = refine_value(self.op.attributes.get('alpha', 0.01))127class _Pooling(CaffeOpExporter):128 def set_attr(self):129 kernel_h, kernel_w = refine_value(self.op.attributes.get('kernel_shape'))130 stride_h, stride_w = refine_value(self.op.attributes.get('strides', [1, 1]))131 ceil_mode = refine_value(self.op.attributes.get('ceil_mode', 0))132 pads = refine_value(self.op.attributes.get('pads', [0, 0]))133 if len(pads) == 2:134 pad_h, pad_w = pads135 elif len(pads) == 4:136 begin_pad = pads[:2]137 end_pad = pads[2:]138 if begin_pad == end_pad:139 pad_h, pad_w = begin_pad140 else:141 logger.error('Caffe only support begin_pad == end_pad in layer')142 else:143 logger.error(f'Unsupported pads attributes with the length of {len(pads)} in Caffe')144 self.layer.pooling_param.kernel_h = kernel_h145 self.layer.pooling_param.kernel_w = kernel_w146 self.layer.pooling_param.pad_h = pad_h147 self.layer.pooling_param.pad_w = pad_w148 self.layer.pooling_param.stride_h = stride_h149 self.layer.pooling_param.stride_w = stride_w150 if ceil_mode == 0:151 # ceil_mode is True by CaffeOpExporter in caffe152 self.layer.pooling_param.ceil_mode = False153@register_class154class GlobalAveragePool(_Pooling):155 def set_attr(self):156 self.layer.pooling_param.global_pooling = True157 self.layer.pooling_param.pool = ppl_caffe_pb2.PoolingParameter.AVE158@register_class159class AveragePool(_Pooling):160 def set_attr(self):161 super(AveragePool, self).set_attr()162 self.layer.pooling_param.global_pooling = False163 self.layer.pooling_param.pool = ppl_caffe_pb2.PoolingParameter.AVE164@register_class165class GlobalMaxPool(_Pooling):166 def set_attr(self):167 self.layer.pooling_param.global_pooling = True168 self.layer.pooling_param.pool = ppl_caffe_pb2.PoolingParameter.MAX169@register_class170class MaxPool(_Pooling):171 def set_attr(self):172 super(MaxPool, self).set_attr()173 self.layer.pooling_param.global_pooling = False174 self.layer.pooling_param.pool = ppl_caffe_pb2.PoolingParameter.MAX175@register_class176class Concat(CaffeOpExporter):177 def set_attr(self):178 self.layer.concat_param.axis = refine_value(self.op.attributes['axis'])179@register_class180class Softmax(CaffeOpExporter):181 def set_attr(self):182 axis = refine_value(self.op.attributes.get('axis', -1))183 if not (axis == -1 or axis == len(self.op.inputs[0].meta.shape) - 1):184 logger.warning(f'Converting to caffe Softmax, the axis={axis}, which is not the last axis. '185 'This may result to incorrect caffe model')186 self.layer.softmax_param.axis = axis187@register_class188class Transpose(CaffeOpExporter):189 def set_attr(self):190 perm = refine_value(self.op.attributes['perm'])191 self.layer.transpose_param.dim.extend(perm)192@register_class193class ReduceL2(CaffeOpExporter):194 def set_attr(self):195 self.layer.reducel2_param.axes = refine_value(self.op.attributes.get('axes'))196 self.layer.reducel2_param.keepdims = refine_value(self.op.attributes.get('keepdims', 1))197@register_class198class ReduceMean(CaffeOpExporter):199 def set_attr(self):200 axis = None201 if 'axis' in self.op.attributes:202 axis = self.op.attributes.get('axis')203 elif 'axes' in self.op.attributes:204 axis = self.op.attributes.get('axes')205 if isinstance(axis, list):206 assert len(axis) == 1, (207 'You are trying to dump a RuduceMean op to caffe, '208 f'however caffe support 1 axis only, your mean opeartion has {len(axis)} working axis')209 axis = axis[0]210 self.layer.reduce_param.axis = axis211@register_class212class Div(CaffeOpExporter):213 pass214@register_class215class Mul(CaffeOpExporter):216 # TODO: Can optimize some case to Scale + Reshape217 # (lhs_shape[1] == rhs_shape[1] and all(i == 1 for i in rhs_shape[2:]))218 def set_attr(self):219 self.layer.eltwise_param.operation = ppl_caffe_pb2.EltwiseParameter.PROD220 def parse(self):221 self.set_attr()222 self.layer.bottom[:] = [var.name for var in self.op.inputs if not var.is_parameter]223 self.layer.top[:] = [var.name for var in self.op.outputs]224 for var in self.op.parameters:225 value = convert_any_to_numpy(var.value)226 if value.size != 1:227 raise AttributeError(f'Now don\'t support Mul op with initializer in shape {value.shape} convert to caffe')228 # Mul olny has two inputs, thus in this loop means the bottom has only one item229 self.layer.eltwise_param.coeff.append(value.item())230 return self.layer231@register_class232class Add(CaffeOpExporter):233 def set_attr(self):234 self.layer.eltwise_param.operation = ppl_caffe_pb2.EltwiseParameter.SUM235 # ONNX op only support no coeff add now236 # https://github.com/onnx/onnx/blob/master/docs/Operators.md#add237 self.layer.eltwise_param.coeff[:] = [1.0] * len(self.op.inputs)238 def parse(self):239 parameter_layers, extend_bottom = [], []240 for i, var in enumerate(self.op.parameters):241 param_layer = ppl_caffe_pb2.LayerParameter(type='Parameter', name=self.op.name + '_param_' + str(i))242 param_layer.top[:] = [var.name]243 extend_bottom.append(var.name)244 blob = ppl_caffe_pb2.BlobProto()245 value = convert_any_to_numpy(var.value)246 if var.meta is not None:247 shape = var.meta.shape248 dtype = DataType.to_numpy(var.meta.dtype)249 else:250 shape, dtype = value.shape, value.dtype251 blob.shape.dim.extend(shape)252 blob.data.extend(value.astype(dtype).flat)253 param_layer.blobs.extend([blob])254 shape_param = param_layer.parameter_param255 if len(shape) == 3:256 shape_param.batch, shape_param.m, shape_param.n = shape257 elif len(shape) == 4:258 shape_param.batch, shape_param.channel, shape_param.height, shape_param.width = shape259 else:260 raise AttributeError(f'Cannot convert {self.op.name} to Eltwise op.')261 parameter_layers.append(param_layer)262 super(Add, self).parse()263 if len(extend_bottom) != 0:264 self.layer.bottom.extend(extend_bottom)265 self.layer.eltwise_param.coeff.extend([1.0] * len(extend_bottom))266 return [*parameter_layers, self.layer]267@register_class268class Max(CaffeOpExporter):269 def set_attr(self):270 self.layer.eltwise_param.operation = ppl_caffe_pb2.EltwiseParameter.MAX271@register_class272class Sub(CaffeOpExporter):273 def set_attr(self):274 self.layer.eltwise_param.operation = ppl_caffe_pb2.EltwiseParameter.SUM275 # ONNX op only support no coeff sub now276 # https://github.com/onnx/onnx/blob/master/docs/Operators.md#Sub277 self.layer.eltwise_param.coeff[:] = [1.0, -1.0]278@register_class279class Reshape(CaffeOpExporter):280 def set_attr(self):281 if len(self.op.inputs) == 0:282 raise AttributeError(f'{self.op.name} has no inputs. Cannot convert to caffe op. '283 'Please optimize the onnx model.')284 shape = convert_any_to_numpy(self.op.parameters[0].value)285 self.layer.reshape_param.shape.dim.extend(shape)286 def parse(self):287 self.set_attr()288 self.layer.bottom[:] = [var.name for var in self.op.inputs if not var.is_parameter]289 self.layer.top[:] = [var.name for var in self.op.outputs]290 return self.layer291@register_class292class Clip(CaffeOpExporter):293 def parse(self):294 self.layer.bottom[:] = [var.name for var in self.op.inputs if not var.is_parameter]295 self.layer.top[:] = [var.name for var in self.op.outputs]296 min_val = refine_value(self.op.attributes.get('min'))297 max_val = refine_value(self.op.attributes.get('max'))298 if len(self.op.parameters) == 2:299 min_val = convert_any_to_numpy(self.op.parameters[0].value).item()300 max_val = convert_any_to_numpy(self.op.parameters[1].value).item()301 if min_val == 0.0 and max_val == 6.0:302 self.layer.type = 'ReLU6'303 else:304 self.layer.clip_param.min = min_val305 self.layer.clip_param.max = max_val306 return self.layer307@register_class308class Gemm(CaffeOpExporter):309 def parse(self):310 super(Gemm, self).parse()311 # Whether need to add transpose layer312 transpose_layer = None313 if refine_value(self.op.attributes.get('transA', 0)) != 0:314 A = self.op.inputs[0]315 shape = A.meta.shape316 if len(shape) == 2:317 transpose_layer = ppl_caffe_pb2.LayerParameter(type='Transpose', name=self.op.name + '_transposed')318 transpose_layer.bottom[:] = [A.name]319 transpose_layer.top[:] = [A.name + '_trans']320 transpose_layer.transpose_param.dim[:] = [1, 0]321 # Modify InnerProduct input322 self.layer.bottom[:] = [A.name + '_trans']323 else:324 raise ValueError('Cannot support transposed gemm with non-2D input.')325 if self.op.attributes.get('transB', 0) == 0:326 B = convert_any_to_numpy(self.op.parameters[0].value)327 BT_value = np.transpose(B, [1, 0])328 self.layer.blobs[0].shape.dim[:] = BT_value.shape329 self.layer.blobs[0].data[:] = BT_value.astype('float32').flat330 self.layer.inner_product_param.num_output = self.layer.blobs[0].shape.dim[0]331 self.layer.inner_product_param.bias_term = True if len(self.op.parameters) == 2 else False332 if transpose_layer is None:333 return self.layer334 else:335 return [transpose_layer, self.layer]336@register_class337class Pad(CaffeOpExporter):338 def set_attr(self):339 mode = refine_value(self.op.attributes.get('mode', 'constant'))340 if mode != 'reflect':341 raise TypeError(f'Unsupport pad mode {mode} in caffe op')342 pads = convert_any_to_numpy(self.op.inputs[1].value) if len(self.op.inputs) > 1 else refine_value(self.op.attributes['pads'])343 if len(pads) == 2:344 pads = [pads[0], 0, pads[1], 0]345 phs, pws, phe, pwe = pads346 assert phs == phe and pws == pwe347 self.layer.pad_param.pad_h = phs348 self.layer.pad_param.pad_w = pws349# TODO: InterP and other cases350@register_class351class Resize(CaffeOpExporter):352 def __init__(self, op):353 self.mode = refine_value(op.attributes.get('mode'))354 self.scales = convert_any_to_numpy(op.inputs[2].value) if len(op.inputs) > 2 else []355 self.sizes = convert_any_to_numpy(op.inputs[-1].value) if len(op.inputs) == 4 else []356 super().__init__(op)357 def set_type(self):358 if self.mode == 'nearest' and len(self.sizes) == 0 and len(self.scales) > 0:359 return 'NNUpsample'360 elif self.mode == 'linear' and len(self.scales) == 0 and len(self.sizes) > 0:361 return 'Interp'362 else:363 raise TypeError(f'Cannot convert {self.op.name} to caffe op')364 def set_attr(self):365 if self.op_type == 'NNUpsample':366 assert len(self.scales) == 4367 valid_flag = (self.scales[0] == 1.0) and (self.scales[1] == 1.0) and (self.scales[2] == self.scales[3])368 if not valid_flag:369 raise AttributeError(f'Cannot convert {self.op.name} to NNUpsample due to different scales')370 self.layer.nn_upsample_param.resize = int(self.scales[2])371 if self.op_type == 'Interp':372 self.layer.interp_param.height = int(self.sizes[-2])373 self.layer.interp_param.width = int(self.sizes[-1])374 trans_mode = refine_value(self.op.attributes.get('coordinate_transformation_mode', 'half_pixel'))375 if trans_mode == 'align_corners':376 self.layer.interp_param.align_corners = True377 elif trans_mode == 'half_pixel':378 self.layer.interp_param.align_corners = False379 else:380 raise AttributeError(f'Cannot convert {self.op.name} in {trans_mode} mode to Interp.')381 def parse(self):382 self.set_attr()383 self.layer.bottom[:] = [var.name for var in self.op.inputs if not var.is_parameter]384 self.layer.top[:] = [var.name for var in self.op.outputs]385 return self.layer386@register_class387class ConvTranspose(Conv):388 pass389@register_class390class Sigmoid(CaffeOpExporter):391 pass392@register_class393class Slice(CaffeOpExporter):394 def parse(self):395 # assert (len(self.op.inputs) == 1)396 input_shape = self.op.inputs[0].meta.shape397 starts, ends = convert_any_to_numpy(self.op.parameters[0].value), convert_any_to_numpy(self.op.parameters[1].value)398 axes = convert_any_to_numpy(self.op.parameters[2].value) if len(self.op.parameters) >= 3 else [i for i in range(len(input_shape))]399 if len(self.op.parameters) >= 4 and any(convert_any_to_numpy(self.op.parameters[3].value) != 1):400 raise AttributeError('Slice op with steps cannot dump to caffe model')401 layers = []402 self.layer = ppl_caffe_pb2.LayerParameter(type=self.op_type, name=self.op.name)403 for i, (start_point, end_point, axis) in enumerate(list(zip(starts, ends, axes))):404 current_layer = ppl_caffe_pb2.LayerParameter(type=self.op_type, name=self.op.name + '_' + str(i))405 current_layer.slice_param.axis = axis406 current_layer.bottom[:] = [var.name for var in self.op.inputs if not var.is_parameter]407 current_layer.top[:] = [var.name for var in self.op.outputs]408 slice_points = [start_point, end_point]409 if start_point == 0:410 slice_points.remove(start_point)411 else:412 current_layer.top.insert(0, self.op.outputs[0].name + '_front')413 if end_point == -1 or end_point == input_shape[axis]:414 slice_points.remove(end_point)415 else:416 current_layer.top.append(self.op.outputs[-1].name + '_behind')417 current_layer.slice_param.slice_point.extend(slice_points)418 layers.append(current_layer)419 return layers420@register_class421class Tanh(CaffeOpExporter):422 pass423@register_class424class Pow(CaffeOpExporter):425 def set_attr(self):426 self.layer.power_param.power = refine_value(self.op.attributes.get('power', 1))427 self.layer.power_param.scale = refine_value(self.op.attributes.get('scale', 1))428 self.layer.power_param.shift = refine_value(self.op.attributes.get('shift', 0))429@register_class430class Scale(CaffeOpExporter):431 def set_attr(self):432 self.layer.scale_param.axis = refine_value(self.op.attributes.get('axis', 1))433 self.layer.scale_param.num_axes = refine_value(self.op.attributes.get('num_axes', 1))434 self.layer.scale_param.bias_term = refine_value(self.op.attributes.get('bias_term', False))435@register_class436class ChannelShuffle(CaffeOpExporter):437 def set_attr(self):438 self.layer.channel_shuffle_param.group = refine_value(self.op.attributes.get('group', 1))439@register_class440class InstanceNormalization(CaffeOpExporter):441 def set_attr(self):442 self.layer.instance_norm_param.num_features = refine_value(self.op.attributes.get('num_features'))443 self.layer.instance_norm_param.eps = refine_value(self.op.attributes.get('eps', 1e-5))444 self.layer.instance_norm_param.affine = refine_value(self.op.attributes.get('affine', False))445@register_class446class Parameter(CaffeOpExporter):447 def set_attr(self):448 self.layer.parameter_param.m = refine_value(self.op.attributes.get('m', -1))449 self.layer.parameter_param.n = refine_value(self.op.attributes.get('n', -1))450 self.layer.parameter_param.batch = refine_value(self.op.attributes.get('batch', 1))451 self.layer.parameter_param.channel = refine_value(self.op.attributes.get('channel', -1))452 self.layer.parameter_param.height = refine_value(self.op.attributes.get('height', -1))453 self.layer.parameter_param.width = refine_value(self.op.attributes.get('width', -1))454@register_class455class Interp(CaffeOpExporter):456 def set_attr(self):457 if refine_value(self.op.attributes.get('shrink_factor')) != 1:458 self.layer.interp_param.shrink_factor = refine_value(self.op.attributes.get('shrink_factor'))459 if refine_value(self.op.attributes.get('zoom_factor')) != 1:460 self.layer.interp_param.zoom_factor = refine_value(self.op.attributes.get('zoom_factor'))461 if refine_value(self.op.attributes.get('width')) and refine_value(self.op.attributes.get('height')):462 self.layer.interp_param.height = refine_value(self.op.attributes.get('height'))463 self.layer.interp_param.width = refine_value(self.op.attributes.get('width'))464 self.layer.interp_param.pad_beg = refine_value(self.op.attributes.get('pad_beg'))465 self.layer.interp_param.pad_end = refine_value(self.op.attributes.get('pad_end'))466 self.layer.interp_param.align_corners = refine_value(self.op.attributes.get('align_corners'))467@register_class468class Tile(CaffeOpExporter):469 def set_attr(self):470 self.layer.tile_param.axis = refine_value(self.op.attributes.get('axis'))471 self.layer.tile_param.tiles = refine_value(self.op.attributes.get('tiles'))472@register_class473class Flatten(CaffeOpExporter):474 def set_attr(self):475 self.layer.flatten_param.axis = refine_value(self.op.attributes.get('axis', 1))476 self.layer.flatten_param.end_axis = refine_value(self.op.attributes.get('end_axis', -1))477@register_class478class SpaceToDepth(CaffeOpExporter):479 def set_attr(self):480 self.layer.subpixel_down_param.downsample = refine_value(self.op.attributes.get('blocksize', 1))481@register_class482class DepthToSpace(CaffeOpExporter):483 def set_attr(self):484 self.layer.subpixel_up_param.upsample = refine_value(self.op.attributes.get('blocksize', 1))485@register_class486class CaffeArgMax(CaffeOpExporter):487 def set_attr(self):488 self.layer.argmax_param.out_max_val = refine_value(self.op.attributes.get('out_max_val'))489 self.layer.argmax_param.top_k = refine_value(self.op.attributes.get('top_k'))490 self.layer.argmax_param.axis = refine_value(self.op.attributes.get('axis'))491@register_class492class HardSwish(CaffeOpExporter):493 pass494@register_class495class HardSigmoid(CaffeOpExporter):...

Full Screen

Full Screen

ops.py

Source:ops.py Github

copy

Full Screen

...4from op import OpsParam, OpsRegister5from op_io import *6############################# IO define ##############################7# graph may has mult-inputs, so graph will have multi-input8OpsRegister.Register("Input").set_attr(input_shape=list(),9 max_len = int(),10 max_batch = int(),11 alias="NULL",12 data_type="NULL",13 layout="NCHW")14# graph out , only hold place for edge15OpsRegister.Register("Output").set_attr()16OpsRegister.Register("Split").set_attr(split_num=int())17############################# Basic Op define ##############################18# two input19OpsRegister.Register("Dot").set_attr(axes=list())20# one or two input21# enum type {22# Add,23# Subtract,24# Multiply,25# Avg,26# Max27# }28# note : coeff only used by caffe for "Add"29OpsRegister.Register("Eltwise").set_attr(type="Add",30 coeff=list())31# list input32OpsRegister.Register("Concat").set_attr(axis=int())33# one input34OpsRegister.Register("Exp").set_attr(base=float(),35 scale=float(),36 shift=float())37# one input38# y = log(shift + scale * x)39OpsRegister.Register("Log").set_attr(base=float(),40 scale=float(),41 shift=float())42# one input43# y = (shift + scale * x) ^ power44OpsRegister.Register("Power").set_attr(shift=float(),45 scale=float(),46 power=float())47# one input48OpsRegister.Register("Softmax").set_attr(axis=int())49# applies an activation parameter function to an output50# enum type:51# enum type {52# TanH,53# Sigmoid,54# }55OpsRegister.Register("Activation").set_attr(type="",56 clip_relu_num=float())57# Leaky version of a Rectified Linear Unit ( alpha != 0 ).58# f(x) = alpha * x : x < 059# f(x) = x : x >= 060# Standard ReLU ( alpha = 0 )61# f(x) = 0 * x : x < 062# f(x) = x : x >= 063# note: alpha is fixed value64OpsRegister.Register("ReLU").set_attr(alpha=float())65# Parametric Rectified Linear Unit66# f(x) = alpha * x : x < 067# f(x) = x : x >= 068# note: alpha is learned array with the same shape as x.69# ref: Parametric ReLU described in K. He et al, Delving Deep into Rectifiers:70# <<Surpassing Human-Level Performance on ImageNet Classification>>, 2015.71OpsRegister.Register("PReLU").set_attr(channel_shared=bool())72# Exponential Linear Unit.73# f(x) = alpha * (exp(x) - 1.0) : x < 074# f(x) = x : x >= 075OpsRegister.Register("ELU").set_attr(alpha=int())76# dense op parameter77OpsRegister.Register("Dense").set_attr(out_dim=int(),78 axis=int(),79 bias_term=bool())80# dropout parameter81OpsRegister.Register("Dropout").set_attr(ratio=float())82OpsRegister.Register("Flatten").set_attr(start_axis=int(),83 end_axis=int())84# caffe unique layer85OpsRegister.Register("Reshape").set_attr(dims=list(),86 axis=int(),87 num_axes=int(),88 layout='')89# Permutes the dimensions of the input according to a given pattern(list type)90OpsRegister.Register("Permute").set_attr(dims=list())91# Cropping op for cropping data of (1/2/3D) by using axis info92# cropping is the same as tf cropping parameter, which saved as tuple or int.93OpsRegister.Register("Crop").set_attr(cropping=list(),94 axis=int())95# slices an input layer to multiple output layers along a given dimension with given slice indices96OpsRegister.Register("Slice").set_attr(axis=int(),97 slice_point=list(),98 slice_dim=int(),99 num=int(),100 sections=list())101############################# Normalization Op define ##############################102# Batch normalization op103# explanation:104# Normalize the activations of the previous layer at each batch,105# i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.106OpsRegister.Register("BatchNorm").set_attr(momentum=float(),107 epsilon=float())108# caffe need may use scale layer after batchnorm layer which tf/mxnet/keras needn't109OpsRegister.Register("Scale").set_attr(axis=int(),110 num_axes=int(),111 bias_term=bool())112# Local Response Normalization op same as caffe,113# which performs a kind of "lateral inhibition" by normalizing over local input regions114# enum NormRegion {115# ACROSS_CHANNELS116# WITHIN_CHANNEL117# }118OpsRegister.Register("LRN").set_attr(local_size=int(),119 alpha=float(),120 beta=float(),121 norm_region="ACROSS_CHANNELS",122 k=float())123# Mean-Variance Normalization124OpsRegister.Register("MVN").set_attr(normalize_variance=bool(),125 across_channels=bool(),126 epsilon=float())127############################# Pooling (1D/2D/3D) Op define ##############################128# enum type:129# enum method {130# MAX, // [default]131# AVG,132# AVGEXC, average_exclude_padding_value133# STOCHASTIC,134# }135OpsRegister.Register("Pooling").set_attr(pool_size=list(),136 strides=list(),137 padding=list(),138 method="MAX",139 global_pooling=bool(),140 cmp_out_shape_floor_as_conv=False)141# Spatial Pyramid Pooling142# enum type:143# enum method {144# MAX, // [default]145# AVG,146# STOCHASTIC,147# }148OpsRegister.Register("SPP").set_attr(pyramid_height=int(),149 method="MAX",)150############################# Convolution (1D/2D/3D) Op define ##############################151# convolution parameter152OpsRegister.Register("Convolution").set_attr(filter_num=int(),153 kernel_size=list(),154 strides=list(),155 padding=list(),156 dilation_rate=list(),157 group=int(),158 axis=int(),159 bias_term=bool())160# Depthwise separable convolution, commonly called "separable convolution" in tf161OpsRegister.Register("DeSepConvolution").set_attr(filter_num=int(),162 kernel_size=list(),163 strides=list(),164 padding=list(),165 dilation_rate=list(),166 group=int(),167 axis=int(),168 depth_multiplier=int())169# also called transposed convolution170OpsRegister.Register("Deconvolution").set_attr(filter_num=int(),171 kernel_size=list(),172 strides=list(),173 padding=list(),174 dilation_rate=list(),175 group=int(),176 axis=int(),177 bias_term=bool())178# DeformableConvolution179OpsRegister.Register("DeformConvolution").set_attr(filter_num=int(),180 kernel_size=list(),181 strides=list(),182 padding=list(),183 dilation_rate=list(),184 group=int(),185 axis=int(),186 bias_term=bool())187############################# Rnn Op define ##############################188# Standard RNN (LSTM/GRU)189# enum rnn type:190# enum type {191# TANH, // base192# SIGMOID, // base193# RELU, // base194# LSTM,195# GRU,196# }197OpsRegister.Register("RNN").set_attr(hidden_size=int(),198 input_size=int(),199 bias_term=bool(),200 dropout=float(),201 type="GRU")202############################# embedding Op define ##############################203# embedding layer, input_dim in tf or caffe means the voc num and output_dim means the emb size204OpsRegister.Register("Embedding").set_attr(input_dim=int(),205 output_dim=int(),206 bias_term=bool())207############################# Accuracy Op define ##############################208# NULL209########### Object track and detection (for adu(caffe layer type)) Op define #############210# RPNProposalSSD for SSD and RPN211OpsRegister.Register("RPNProposalSSD").set_attr(**RPNProposalSSD_param())212OpsRegister.Register("RCNNDetOutputWithAttr").set_attr(**detection_output_ssd_param())213OpsRegister.Register("DFMBPSROIAlign").set_attr(**dfmb_psroi_pooling_param())214OpsRegister.Register("RCNNProposal").set_attr(**RPNProposalSSD_param())215OpsRegister.Register("ProposalImgScaleToCamCoords").set_attr(**proposal_img_scale_to_cam_coords_param())216########### VIS Op define #############217OpsRegister.Register("Axpy").set_attr()218OpsRegister.Register("PriorBox").set_attr(min_size=list(),219 max_size=list(),220 aspect_ratio=list(),221 fixed_size=list(),222 fixed_ratio=list(),223 density=list(),224 is_flip=bool(),225 is_clip=bool(),226 variance=list(),227 img_h=int(),228 img_w=int(),229 step_h=float(),230 step_w=float(),231 offset=float(),232 order=list())233# enum code_type {234# CORNER,235# CENTER_SIZE,236# CORNER_SIZE,237# }238OpsRegister.Register("DetectionOutput").set_attr(share_location=bool(),239 variance_encode_in_target=bool(),240 class_num=int(),241 background_id=int(),242 keep_top_k=int(),243 code_type="CORNER",244 conf_thresh=float(),245 nms_top_k=int(),246 nms_thresh=float(),247 nms_eta=float())248########### ADU Op define #############249OpsRegister.Register("Argmax").set_attr(out_max_val=bool(),250 top_k=int(),251 axis=int(),252 axis_term=bool())253########### OCR Op define #############254OpsRegister.Register("Im2Sequence").set_attr(paddings=list(),255 strides=list(),256 window_size=list(),257 dilations=list())258OpsRegister.Register("Cast").set_attr(in_type=int(),259 out_type=int())260OpsRegister.Register("Gru").set_attr(is_reverse=bool(),261 gate_activation="sigmoid",262 activation="relu",263 gru_formula="")264OpsRegister.Register("CtcAlign").set_attr(merge_repeated=bool(),265 blank=int())266########### RNN Op define #############267OpsRegister.Register("Embedding").set_attr(word_num=int(),268 emb_dim=int(),269 padding_idx=int())270OpsRegister.Register("SequencePool").set_attr(pooltype="LAST")271OpsRegister.Register("SequenceConv").set_attr(filter_num=int(),272 kernel_size=list(),273 padding_trainable=bool(),274 context_stride=int(),275 context_start=int(),276 context_length=int())277OpsRegister.Register("CrfDecoding").set_attr()278OpsRegister.Register("LSTM").set_attr(candidate_activation="tanh",279 cell_activation="tanh",280 gate_activation="sigmoid",281 is_reverse=bool(),282 use_peepholes=bool(),283 num_direction=int(),284 dropout_param=float(),285 num_layers=int(),286 input_activation="null")287OpsRegister.Register("LSTMP").set_attr(outDim=int(),288 skipNum=int(),289 reActType='tanh',290 cellDim=int())291OpsRegister.Register("MatMul").set_attr(transpose_x=bool(),292 transpose_y=bool(),293 coeff=float())294OpsRegister.Register("LayerNorm").set_attr(is_across_spatial=bool(),295 is_shared_channel=bool(),296 begin_norm_axis=int(),297 eps=float())298OpsRegister.Register("Resize").set_attr(method="BILINEAR_ALIGN",299 height_scale=float(),300 width_scale=float(),301 out_width=int(),302 out_height=int())303OpsRegister.Register("Normalize").set_attr(begin_norm_axis=int(),304 is_across_spatial=bool(),305 is_shared_channel=bool(),306 eps=float(),307 p=int())308OpsRegister.Register("Pad").set_attr(pad_c=list(),309 pad_h=list(),310 pad_w=list())311OpsRegister.Register("ShuffleChannel").set_attr(group=int())312OpsRegister.Register("RoisAnchorFeature").set_attr(min_anchor_size=float(),313 num_anchor_scales=int(),314 anchor_scale_pow_base=float(),315 anchor_wph_ratios=list(),316 num_top_iou_anchor=int(),317 min_num_top_iou_anchor=int(),318 iou_thr=float(),319 ft_ratio_h=bool(),320 ft_ratio_w=bool(),321 ft_log_ratio_h=bool(),322 ft_log_ratio_w=bool(),323 bbox_size_add_one=bool())324OpsRegister.Register("Interp").set_attr(height=int(),325 width=int(),326 zoom_factor=int(),327 shrink_factor=int(),328 pad_beg=int(),329 pad_end=int())330##################################### reverse_sequence op define ############################ #########331####### it is named BatchReverseSequenceLayer in lego332#333OpsRegister.Register("ReverseSequence").set_attr() ##no prams , no weights.334##################################### reverse op define #####################################335####### it is named BatchReverseInputLayer in lego336OpsRegister.Register("Reverse").set_attr() ## no prams, no weights.337##################################### embedding_lg op define ################################ #####338####### it is named BatchEmbeddingLayer in lego339OpsRegister.Register("EmbeddingLg").set_attr() ## ???? is it same to Embedding?340##################################### grnn(single-layer, single-direction GRU) op define #### #################################341####### it is named BatchGrnnLayer in lego342OpsRegister.Register("GRNN").set_attr() ## ???? is it same to RNN?343##################################### match_matrix op define ################################ #####344####### it is named BatchMatchMatrixTensorLayer in lego345OpsRegister.Register("MatchMatrix").set_attr(dim_in = int(),346 dim_t = int(),347 linear_term = bool(),348 bias_term = bool(),349 diag_init = int(),350 diag_init_dim_num = int(),351 init_low = int(),352 init_up = int())353##################################### var_size_conv op define ############################### ######354####### it is named BatchVarSizeConvLayer in lego355OpsRegister.Register("VarSizeConv").set_attr() ## it is same to convolution????356##################################### topk_pooling op define ################################ #####357###### it is named BatchTopKPoolingLayer in lego358OpsRegister.Register("TopKPooling").set_attr(top_k = int(),359 feat_map_num = int())360##################################### topk_avg_pooling op define ############################ #########361###### it is named BatchTopKAvgPoolingByRowLayer in lego362OpsRegister.Register("TopKAvgPooling").set_attr(top_ks = list(),363 feat_map_num = int(),364 is_pooling_by_row = bool())365##################################### extract_last op define ################################ #####366###### it is named BatchExtractLastLayer in lego,367OpsRegister.Register("SequencePool").set_attr(pooltype = str()) #no paras, no weights.368#####################################Unpadding_padding op define ############################ #########369###### it is named UnpaddingPaddingLayer in lego,370OpsRegister.Register("ConvUnpaddingPadding").set_attr() #no paras, no weights.371# Fast-RCNN372OpsRegister.Register("AffineChannel").set_attr() #no paras, no weights.373OpsRegister.Register("AnchorGenerator").set_attr(anchor_sizes=list(),374 aspect_ratios=list(),375 variances=list(),376 stride=list(),377 offset=float())378OpsRegister.Register("GenerateProposals").set_attr(pre_nms_top_n=int(),379 post_nms_top_n=int(),380 nms_thresh=float(),381 min_size=float(),382 eta=float())383OpsRegister.Register("RoiAlign").set_attr(spatial_scale=float(),384 pooled_height=int(),385 pooled_width=int(),386 sampling_ratio=int())387OpsRegister.Register("RoiPool").set_attr(spatial_scale=float(),388 pooled_height=int(),389 pooled_width=int())390##################################### pytorch edsr model PixelShuffle op define ################################391# PixelShuffle in_shape = [n, r * r * c, h, w] scale_factor = r ==> out_shape = [n, c, r * h, r * w]392OpsRegister.Register("PixelShuffle").set_attr(scale_factor=int())393OpsRegister.Register("Coord2Patch").set_attr(img_h=int(),394 output_h=int(),395 output_w=int())396OpsRegister.Register("DataNorm").set_attr(epsilon=float())397OpsRegister.Register("Pad2D").set_attr(mode="constant",398 value=float(),399 pad_h=list(),400 pad_w=list())401OpsRegister.Register("SequencePoolConcat").set_attr(pooltype=str(),402 slot_num=int(),403 axis=int())404OpsRegister.Register("SRoiAlign").set_attr(pooled_h=int(),405 pooled_w=int(),406 spatial_scale=float())407OpsRegister.Register("SProposal").set_attr(feat_stride=int(),408 basesize=int(),409 boxminsize=int(),410 pre_nms_topn=int(),411 post_nms_topn=int(),412 nms_thresh=float(),413 scale=list(),...

Full Screen

Full Screen

vmmanagertest.py

Source:vmmanagertest.py Github

copy

Full Screen

...5class VMManagerTest(testbase.ClustoTestBase):6 def data(self):7 vmm = VMManager('vmm')8 s1 = BasicServer('s1')9 s1.set_attr('system', subkey='memory', value=1000)10 s1.set_attr('system', subkey='disk', value=5000)11 s1.set_attr('system', subkey='cpucount', value=2)12 13 s2 = BasicServer('s2')14 s2.set_attr('system', subkey='memory', value=16000)15 s2.set_attr('system', subkey='disk', value=2500)16 s2.set_attr('system', subkey='cpucount', value=2)17 18 vmm.insert(s1)19 vmm.insert(s2)20 21 def testVMManagerAllocate(self):22 s1 = clusto.get_by_name('s1')23 s2 = clusto.get_by_name('s2')24 25 vs1 = BasicVirtualServer('vs1')26 vs1.set_attr('system', subkey='memory', value=1000)27 vs1.set_attr('system', subkey='disk', value=50)28 vs1.set_attr('system', subkey='cpucount', value=1)29 vs2 = BasicVirtualServer('vs2')30 vs2.set_attr('system', subkey='memory', value=8000)31 vs2.set_attr('system', subkey='disk', value=1000)32 vs2.set_attr('system', subkey='cpucount', value=1)33 vs3 = BasicVirtualServer('vs3')34 vs3.set_attr('system', subkey='memory', value=800)35 vs3.set_attr('system', subkey='disk', value=100)36 vs3.set_attr('system', subkey='cpucount', value=3)37 vmm = clusto.get_by_name('vmm')38 vmm.allocate(vs1)39 self.assertEqual(len(vmm.resources(vs1)), 1)40 self.assert_(vmm.resources(vs1)[0].value in [s1, s2])41 vmm.allocate(vs2)42 self.assertEqual([r.value for r in vmm.resources(vs2)], [s2])43 self.assertRaises(ResourceException, vmm.allocate, vs3)44 def testVMDestroy(self):45 vmm = clusto.get_by_name('vmm')46 vs1 = BasicVirtualServer('vs1')47 vs1.set_attr('system', subkey='memory', value=1000)48 vs1.set_attr('system', subkey='disk', value=50)49 vs1.set_attr('system', subkey='cpucount', value=2)50 vs2 = BasicVirtualServer('vs2')51 vs2.set_attr('system', subkey='memory', value=5000)52 vs2.set_attr('system', subkey='disk', value=50)53 vs2.set_attr('system', subkey='cpucount', value=2)54 vs3 = BasicVirtualServer('vs3')55 vs3.set_attr('system', subkey='memory', value=1000)56 vs3.set_attr('system', subkey='disk', value=50)57 vs3.set_attr('system', subkey='cpucount', value=1)58 s1 = clusto.get_by_name('s1')59 s2 = clusto.get_by_name('s2')60 61 vmm.allocate(vs1)62 vmm.allocate(vs2)63 self.assertRaises(ResourceException, vmm.allocate, vs3)64 vmm.deallocate(vs2)65 vmm.allocate(vs3)66 self.assertEqual([r.value for r in vmm.resources(vs3)],67 [clusto.get_by_name('s2')])68 69 def testVMAllocateToSpecificHost(self):70 vs1 = BasicVirtualServer('vs1')71 vs1.set_attr('system', subkey='memory', value=1000)72 vs1.set_attr('system', subkey='disk', value=50)73 vs1.set_attr('system', subkey='cpucount', value=2)74 vs2 = BasicVirtualServer('vs2')75 vs2.set_attr('system', subkey='memory', value=5000)76 vs2.set_attr('system', subkey='disk', value=50)77 vs2.set_attr('system', subkey='cpucount', value=2)78 vs3 = BasicVirtualServer('vs3')79 vs3.set_attr('system', subkey='memory', value=1000)80 vs3.set_attr('system', subkey='disk', value=50)81 vs3.set_attr('system', subkey='cpucount', value=1)82 s1 = clusto.get_by_name('s1')83 s2 = clusto.get_by_name('s2')84 s3 = BasicServer('s3')85 86 vmm = clusto.get_by_name('vmm')87 vmm.allocate(vs1, s1)88 self.assertRaises(ResourceException, vmm.allocate, vs2, s3)89 self.assertRaises(ResourceException, vmm.allocate, vs1, s1)90 self.assertRaises(ResourceException, vmm.allocate, vs1, s2)91 self.assertEqual([r.value for r in vmm.resources(vs1)],92 [clusto.get_by_name('s1')])93 self.assertRaises(ResourceException, vmm.allocate, vs2, s1)94 self.assertEqual([r.value for r in vmm.resources(vs2)],95 [])96 vmm.allocate(vs2, s1, force=True)97 self.assertEqual([r.value for r in vmm.resources(vs2)],98 [clusto.get_by_name('s1')])99 def testAddingAndRemovingHosts(self):100 s1 = clusto.get_by_name('s1')101 s2 = clusto.get_by_name('s2')102 s3 = BasicServer('s3')103 s3.set_attr('system', subkey='memory', value=16000)104 s3.set_attr('system', subkey='disk', value=2500)105 s3.set_attr('system', subkey='cpucount', value=2)106 107 vmm = clusto.get_by_name('vmm')108 vs1 = BasicVirtualServer('vs1')109 vs1.set_attr('system', subkey='memory', value=1000)110 vs1.set_attr('system', subkey='disk', value=50)111 vs1.set_attr('system', subkey='cpucount', value=2)112 self.assertRaises(ResourceException, vmm.allocate, vs1, s3)113 vmm.allocate(vs1, s1)114 self.assertRaises(ResourceException, vmm.remove, s1)115 vmm.deallocate(vs1)116 vmm.remove(s1)117 vmm.insert(s3)118 vmm.allocate(vs1, s3)119 def testReservingResource(self):120 s1 = clusto.get_by_name('s1')121 s2 = clusto.get_by_name('s2')122 vmm = clusto.get_by_name('vmm')123 vs1 = BasicVirtualServer('vs1')124 vs1.set_attr('system', subkey='memory', value=1000)125 vs1.set_attr('system', subkey='disk', value=50)126 vs1.set_attr('system', subkey='cpucount', value=2)127 vmm.allocate(vmm, s1)128 self.assertRaises(ResourceException, vmm.allocate, vs1, s1)129 130class EC2VMManagerTest(testbase.ClustoTestBase):131 def data(self):132 vmm = clusto.drivers.EC2VMManager('ec2man')...

Full Screen

Full Screen

ops_fluid.py

Source:ops_fluid.py Github

copy

Full Screen

2# Copyright (c) 2017, Cuichaowen. All rights reserved.3# -*- coding: utf-8 -*-4from op import OpsParam, OpsRegister5from op_io import *6OpsRegister.Register("elementwise_mul").set_attr()7OpsRegister.Register("depthwise_conv2d").set_attr()8OpsRegister.Register("transpose").set_attr()9OpsRegister.Register("reshape").set_attr()10OpsRegister.Register("concat").set_attr()11OpsRegister.Register("box_coder").set_attr()12OpsRegister.Register("im2sequence").set_attr()13OpsRegister.Register("sum").set_attr()14OpsRegister.Register("top_k").set_attr()15OpsRegister.Register("ctc_align").set_attr()16OpsRegister.Register("cast").set_attr()17OpsRegister.Register("elementwise_add_fulid").set_attr()18OpsRegister.Register("lookup_table").set_attr()19OpsRegister.Register("lstm").set_attr()20OpsRegister.Register("sequence_pool").set_attr()21OpsRegister.Register("tanh").set_attr()22OpsRegister.Register("sequence_conv").set_attr()23OpsRegister.Register("stanh").set_attr()24OpsRegister.Register("matmul").set_attr()25OpsRegister.Register("layer_norm").set_attr()26OpsRegister.Register("dropout").set_attr()27OpsRegister.Register("scale").set_attr()28OpsRegister.Register("norm").set_attr()29OpsRegister.Register("lod_reset").set_attr()30OpsRegister.Register("fill_constant").set_attr()31OpsRegister.Register("lod_rank_table").set_attr()32OpsRegister.Register("max_sequence_len").set_attr()33OpsRegister.Register("less_than").set_attr()34OpsRegister.Register("lod_tensor_to_array").set_attr()35OpsRegister.Register("write_to_array").set_attr()36OpsRegister.Register("reorder_lod_tensor_by_rank").set_attr()37OpsRegister.Register("while").set_attr()38OpsRegister.Register("array_to_lod_tensor").set_attr()39OpsRegister.Register("assign").set_attr()40OpsRegister.Register("assign_value").set_attr()41OpsRegister.Register("shape").set_attr()42OpsRegister.Register("fake_quantize_abs_max").set_attr()43OpsRegister.Register("fake_dequantize_max_abs").set_attr()44OpsRegister.Register("fake_quantize_range_abs_max").set_attr()45OpsRegister.Register("fake_dequantize_range_max_abs").set_attr()46OpsRegister.Register("increment").set_attr()47OpsRegister.Register("fusion_dropout_add_ln_quant").set_attr()48OpsRegister.Register("dequantize_max_abs_rowwise").set_attr()49OpsRegister.Register("quantize_abs_max_rowwise").set_attr()50OpsRegister.Register("fusion_add_relu_dropout_quant").set_attr()51OpsRegister.Register("fill_constant_batch_size_like").set_attr()52OpsRegister.Register("beam_search_decode").set_attr()53OpsRegister.Register('reduce').set_attr(54 reduce_type=str(),55 keep_dim=bool(),56 reduce_dim=list(),57 reduce_all=bool(),58 coeff=float(),59)60OpsRegister.Register('arg_max').set_attr(61 out_max_val=bool(),62 top_k=int(),63 axis=int(),64)65OpsRegister.Register('sequence_expand').set_attr(66 ref_level=int(),67)68OpsRegister.Register('eltwise').set_attr(69 type=str(),70 coeff=float(),71)72OpsRegister.Register('cast').set_attr(73 int_type=int(),74 out_type=int(),75)76OpsRegister.Register('yolo_box').set_attr(77 anchors=list(),78 class_num=int(),79 conf_thresh=float(),80 downsample_ratio=int(),81)82OpsRegister.Register('slice').set_attr(83 slice_dim=int(),84 slice_point=list(),85 axis=int(),86)87OpsRegister.Register('box_coder').set_attr(88 axis=int(),89 box_normalized=bool(),90 variance=list(),91)92OpsRegister.Register('GroupNormal').set_attr(93 has_scale=bool(),94 has_bias=bool(),95 eps=float(),96 group=int(),97)98OpsRegister.Register('slice_v2').set_attr(99 starts=list(),100 ends=list(),101 axes=list(),102)103OpsRegister.Register('arithmetic').set_attr(104 op_type=int(),105)106OpsRegister.Register('aligned_mat_mul').set_attr(107 is_transpose_X=bool(),108 is_transpose_Y=bool(),109 scale=float(),110)111OpsRegister.Register('attention_padding_mask').set_attr(112 mask=float(),113 pad_id=int(),114)115OpsRegister.Register('topk_avg_pooling').set_attr(116 top_ks=list(),117 feat_map_num=int(),118 is_pooling_by_row=bool(),119)120OpsRegister.Register('Dense').set_attr(121 axis=int(),122 out_dim=int(),123 bias_term=bool(),124)125OpsRegister.Register('MatchMatrix').set_attr(126 dim_in=int(),127 dim_t=int(),128 linear_term=bool(),129 bias_term=bool(),130 is_l_same=bool(),...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful