How to use safe_getattr method in Pytest

Best Python code snippet using pytest

transformer_lm.py

Source:transformer_lm.py Github

copy

Full Screen

...228 def build_model(cls, args, task):229 """Build a new model instance."""230 if args.decoder_layers_to_keep:231 args.decoder_layers = len(args.decoder_layers_to_keep.split(","))232 if safe_getattr(args, "max_target_positions", None) is None:233 args.max_target_positions = safe_getattr(234 args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS235 )236 if args.character_embeddings:237 embed_tokens = CharacterTokenEmbedder(238 task.source_dictionary,239 eval(args.character_filters),240 args.character_embedding_dim,241 args.decoder_embed_dim,242 args.char_embedder_highway_layers,243 )244 elif args.adaptive_input:245 embed_tokens = AdaptiveInput(246 len(task.source_dictionary),247 task.source_dictionary.pad(),248 args.decoder_input_dim,249 args.adaptive_input_factor,250 args.decoder_embed_dim,251 options.eval_str_list(args.adaptive_input_cutoff, type=int),252 args.quant_noise_pq,253 args.quant_noise_pq_block_size,254 )255 else:256 embed_tokens = cls.build_embedding(257 args, task.source_dictionary, args.decoder_input_dim258 )259 if args.tie_adaptive_weights:260 assert args.adaptive_input261 assert args.adaptive_input_factor == args.adaptive_softmax_factor262 assert (263 args.adaptive_softmax_cutoff == args.adaptive_input_cutoff264 ), "{} != {}".format(265 args.adaptive_softmax_cutoff, args.adaptive_input_cutoff266 )267 assert args.decoder_input_dim == args.decoder_output_dim268 decoder = TransformerDecoder(269 args, task.target_dictionary, embed_tokens, no_encoder_attn=True270 )271 return cls(decoder)272 @classmethod273 def build_embedding(cls, args, dictionary, embed_dim, path=None):274 embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad())275 return embed_tokens276def base_lm_architecture(args):277 # backward compatibility for older model checkpoints278 if safe_hasattr(args, "no_tie_adaptive_proj"):279 # previous models defined --no-tie-adaptive-proj, so use the existence of280 # that option to determine if this is an "old" model checkpoint281 args.no_decoder_final_norm = True # old models always set this to True282 if args.no_tie_adaptive_proj is False:283 args.tie_adaptive_proj = True284 if safe_hasattr(args, "decoder_final_norm"):285 args.no_decoder_final_norm = not args.decoder_final_norm286 args.dropout = safe_getattr(args, "dropout", 0.1)287 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0)288 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512)289 args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048)290 args.decoder_layers = safe_getattr(args, "decoder_layers", 6)291 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8)292 args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None)293 args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0)294 args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4)295 args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False)296 args.activation_fn = safe_getattr(args, "activation_fn", "relu")297 args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0)298 args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None)299 args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0)300 args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8)301 args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0)302 args.base_layers = safe_getattr(args, "base_layers", 0)303 args.base_sublayers = safe_getattr(args, "base_sublayers", 1)304 args.base_shuffle = safe_getattr(args, "base_shuffle", False)305 args.add_bos_token = safe_getattr(args, "add_bos_token", False)306 args.no_token_positional_embeddings = safe_getattr(307 args, "no_token_positional_embeddings", False308 )309 args.share_decoder_input_output_embed = safe_getattr(310 args, "share_decoder_input_output_embed", False311 )312 args.character_embeddings = safe_getattr(args, "character_embeddings", False)313 args.decoder_output_dim = safe_getattr(314 args, "decoder_output_dim", args.decoder_embed_dim315 )316 args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim)317 # Model training is not stable without this318 args.decoder_normalize_before = True319 args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False)320 args.adaptive_input = safe_getattr(args, "adaptive_input", False)321 args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4)322 args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None)323 args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False)324 args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False)325 args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False)326 args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False)327 args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False)328 args.offload_activations = safe_getattr(args, "offload_activations", False)329 if args.offload_activations:330 args.checkpoint_activations = True331@register_model_architecture("transformer_lm", "transformer_lm_big")332def transformer_lm_big(args):333 args.decoder_layers = safe_getattr(args, "decoder_layers", 12)334 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024)335 args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096)336 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16)337 base_lm_architecture(args)338@register_model_architecture("transformer_lm", "transformer_lm_wiki103")339@register_model_architecture("transformer_lm", "transformer_lm_baevski_wiki103")340def transformer_lm_baevski_wiki103(args):341 args.decoder_layers = safe_getattr(args, "decoder_layers", 16)342 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8)343 args.dropout = safe_getattr(args, "dropout", 0.3)344 args.adaptive_input = safe_getattr(args, "adaptive_input", True)345 args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True)346 args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", "20000,60000")347 args.adaptive_softmax_cutoff = safe_getattr(348 args, "adaptive_softmax_cutoff", "20000,60000"349 )350 args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2)351 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)352 args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1)353 args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True)354 args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True)355 transformer_lm_big(args)356@register_model_architecture("transformer_lm", "transformer_lm_gbw")357@register_model_architecture("transformer_lm", "transformer_lm_baevski_gbw")358def transformer_lm_baevski_gbw(args):359 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512)360 args.dropout = safe_getattr(args, "dropout", 0.1)361 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)362 args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True)363 transformer_lm_big(args)364@register_model_architecture("transformer_lm", "transformer_lm_gpt")365def transformer_lm_gpt(args):366 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768)367 args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072)368 args.decoder_layers = safe_getattr(args, "decoder_layers", 12)369 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12)370 args.dropout = safe_getattr(args, "dropout", 0.1)371 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)372 args.activation_fn = safe_getattr(args, "activation_fn", "gelu")373 base_lm_architecture(args)374@register_model_architecture("transformer_lm", "transformer_lm_gpt2_small")375def transformer_lm_gpt2_small(args):376 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024)377 args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096)378 args.decoder_layers = safe_getattr(args, "decoder_layers", 24)379 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16)380 args.dropout = safe_getattr(args, "dropout", 0.1)381 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)382 args.activation_fn = safe_getattr(args, "activation_fn", "gelu")383 base_lm_architecture(args)384@register_model_architecture("transformer_lm", "transformer_lm_gpt2_tiny")385def transformer_lm_gpt2_tiny(args):386 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64)387 args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64)388 args.decoder_layers = safe_getattr(args, "decoder_layers", 2)389 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1)390 args.dropout = safe_getattr(args, "dropout", 0.1)391 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)392 args.activation_fn = safe_getattr(args, "activation_fn", "gelu")393 base_lm_architecture(args)394@register_model_architecture("transformer_lm", "transformer_lm_gpt2_medium")395def transformer_lm_gpt2_medium(args):396 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280)397 args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120)398 args.decoder_layers = safe_getattr(args, "decoder_layers", 36)399 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20)400 args.dropout = safe_getattr(args, "dropout", 0.1)401 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)402 args.activation_fn = safe_getattr(args, "activation_fn", "gelu")403 base_lm_architecture(args)404@register_model_architecture("transformer_lm", "transformer_lm_gpt2_big")405def transformer_lm_gpt2_big(args):406 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600)407 args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400)408 args.decoder_layers = safe_getattr(args, "decoder_layers", 48)409 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25)410 args.dropout = safe_getattr(args, "dropout", 0.1)411 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)412 args.activation_fn = safe_getattr(args, "activation_fn", "gelu")413 base_lm_architecture(args)414def base_gpt3_architecture(args):415 args.decoder_input_dim = args.decoder_embed_dim416 args.decoder_output_dim = args.decoder_embed_dim417 args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4)418 # GPT-3 used learned positional embeddings, rather than sinusoidal419 args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True)420 args.dropout = safe_getattr(args, "dropout", 0.0)421 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0)422 args.activation_fn = safe_getattr(args, "activation_fn", "gelu")423 args.share_decoder_input_output_embed = True424 base_lm_architecture(args)425@register_model_architecture("transformer_lm", "transformer_lm_gpt3_small")426def transformer_lm_gpt3_small(args):427 # 125M params428 args.decoder_layers = safe_getattr(args, "decoder_layers", 12)429 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768)430 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12)431 base_gpt3_architecture(args)432@register_model_architecture("transformer_lm", "transformer_lm_gpt3_medium")433def transformer_lm_gpt3_medium(args):434 # 350M params435 args.decoder_layers = safe_getattr(args, "decoder_layers", 24)436 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024)437 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16)438 base_gpt3_architecture(args)439@register_model_architecture("transformer_lm", "transformer_lm_gpt3_large")440def transformer_lm_gpt3_large(args):441 # 760M params442 args.decoder_layers = safe_getattr(args, "decoder_layers", 24)443 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536)444 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16)445 base_gpt3_architecture(args)446@register_model_architecture("transformer_lm", "transformer_lm_gpt3_xl")447def transformer_lm_gpt3_xl(args):448 # 1.3B params449 args.decoder_layers = safe_getattr(args, "decoder_layers", 24)450 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048)451 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32)452 base_gpt3_architecture(args)453@register_model_architecture("transformer_lm", "transformer_lm_gpt3_2_7")454def transformer_lm_gpt3_2_7(args):455 # 2.7B params456 args.decoder_layers = safe_getattr(args, "decoder_layers", 32)457 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560)458 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32)459 base_gpt3_architecture(args)460@register_model_architecture("transformer_lm", "transformer_lm_gpt3_6_7")461def transformer_lm_gpt3_6_7(args):462 # 6.7B params463 args.decoder_layers = safe_getattr(args, "decoder_layers", 32)464 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096)465 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32)466 base_gpt3_architecture(args)467@register_model_architecture("transformer_lm", "transformer_lm_gpt3_13")468def transformer_lm_gpt3_13(args):469 # 13B params470 args.decoder_layers = safe_getattr(args, "decoder_layers", 40)471 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120)472 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40)473 base_gpt3_architecture(args)474@register_model_architecture("transformer_lm", "transformer_lm_gpt3_175")475def transformer_lm_gpt3_175(args):476 # 175B params477 args.decoder_layers = safe_getattr(args, "decoder_layers", 96)478 args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288)479 args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96)...

Full Screen

Full Screen

model.py

Source:model.py Github

copy

Full Screen

...454 return self.lm_head(features, masked_tokens)455 def max_positions(self):456 """Maximum output length supported by the encoder."""457 return self.args.max_positions458def safe_getattr(obj, k, default=None):459 from omegaconf import OmegaConf460 if OmegaConf.is_config(obj):461 return obj[k] if k in obj and obj[k] is not None else default462 return getattr(obj, k, default)463@register_model_architecture("roberta", "roberta")464def base_architecture(args):465 args.encoder_layers = safe_getattr(args, "encoder_layers", 12)466 args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 768)467 args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 3072)468 args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 12)469 args.dropout = safe_getattr(args, "dropout", 0.1)470 args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)471 args.activation_dropout = safe_getattr(args, "activation_dropout", 0.0)472 args.pooler_dropout = safe_getattr(args, "pooler_dropout", 0.0)473 args.max_source_positions = safe_getattr(args, "max_positions", 512)474 args.no_token_positional_embeddings = safe_getattr(475 args, "no_token_positional_embeddings", False476 )477 # BERT has a few structural differences compared to the original Transformer478 args.encoder_learned_pos = safe_getattr(args, "encoder_learned_pos", True)479 args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", True)480 args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", True)481 args.activation_fn = safe_getattr(args, "activation_fn", "gelu")482 args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", False)483 args.pooler_activation_fn = safe_getattr(args, "pooler_activation_fn", "tanh")484 args.untie_weights_roberta = safe_getattr(args, "untie_weights_roberta", False)485 # Adaptive input config486 args.adaptive_input = safe_getattr(args, "adaptive_input", False)487 # LayerDrop config488 args.encoder_layerdrop = safe_getattr(args, "encoder_layerdrop", 0.0)489 args.encoder_layers_to_keep = safe_getattr(args, "encoder_layers_to_keep", None)490 # Quantization noise config491 args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0)492 args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8)493 args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0)494 # R4F config495 args.spectral_norm_classification_head = safe_getattr(496 args, "spectral_norm_classification_head", False497 )498@register_model_architecture("roberta", "roberta_prenorm")499def roberta_prenorm_architecture(args):500 args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False)501 args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", True)502 base_architecture(args)503@register_model_architecture("roberta", "roberta_base")504def roberta_base_architecture(args):505 base_architecture(args)506@register_model_architecture("roberta", "roberta_large")507def roberta_large_architecture(args):508 args.encoder_layers = safe_getattr(args, "encoder_layers", 24)509 args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1024)510 args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 4096)511 args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16)512 base_architecture(args)513@register_model_architecture("roberta", "xlm")514def xlm_architecture(args):515 args.encoder_layers = safe_getattr(args, "encoder_layers", 16)516 args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1280)517 args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 1280 * 4)518 args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16)...

Full Screen

Full Screen

__init__.py

Source:__init__.py Github

copy

Full Screen

...3import raven4from raven_aiohttp import AioHttpTransport5from sanic.handlers import ErrorHandler6from sanic import exceptions as sanic_exceptions7def safe_getattr(request, attr_name, default=None):8 # pylint:disable=bare-except9 try:10 return getattr(request, attr_name, default)11 except:12 return default13class SanicSentryErrorHandler(ErrorHandler):14 DEFAULT_EXCEPTIONS_TO_IGNORE = (sanic_exceptions.NotFound,)15 def __init__(self, dsn, exceptions_to_ignore=None, **sentry_kwargs):16 super(SanicSentryErrorHandler, self).__init__()17 self.exceptions_to_ignore = tuple(exceptions_to_ignore) if exceptions_to_ignore is not None else self.DEFAULT_EXCEPTIONS_TO_IGNORE18 # For sentry_kwargs see19 # https://docs.sentry.io/clients/python/advanced/#client-arguments20 self.sentry_client = raven.Client(dsn, transport=AioHttpTransport, **sentry_kwargs)21 def default(self, request, exception):22 if not isinstance(exception, self.exceptions_to_ignore):23 exc_info = (type(exception), exception, exception.__traceback__)24 extra = self._request_debug_info(request) if request else dict()25 self.sentry_client.captureException(exc_info, extra=extra)26 return super(SanicSentryErrorHandler, self).default(request, exception)27 def _request_debug_info(self, request):28 # pylint:disable=no-self-use29 return dict(30 url=safe_getattr(request, "url"),31 method=safe_getattr(request, "method"),32 headers=safe_getattr(request, "headers"),33 body=safe_getattr(request, "body"),34 query_string=safe_getattr(request, "query_string"),35 )36 def intercept_exception(self, function):37 """38 Decorator for Sanic exception views.39 You should use this decorator only if your exception handler returns its own response.40 If you're handler returns `None` the default exception handler will be called which41 means Sentry will be called twice.42 Example:43 >> @app.exception([Exception, ])44 >> @sentry_client.intercept_exception45 >> def handle_exception(request, exception):46 >> pass47 """48 @wraps(function)...

Full Screen

Full Screen

inspect.py

Source:inspect.py Github

copy

Full Screen

...8"""9def isdescriptor(x):10 """Check if the object is some kind of descriptor."""11 for item in '__get__', '__set__', '__delete__':12 if hasattr(safe_getattr(x, item, None), '__call__'):13 return True14 return False15def safe_getattr(obj, name, *defargs):16 """A getattr() that turns all exceptions into AttributeErrors."""17 try:18 return getattr(obj, name, *defargs)19 except Exception:20 # this is a catch-all for all the weird things that some modules do21 # with attribute access22 if defargs:23 return defargs[0]24 raise AttributeError(name)25def safe_getmembers(object, predicate=None):26 """A version of inspect.getmembers() that uses safe_getattr()."""27 results = []28 for key in dir(object):29 try:30 value = safe_getattr(object, key, None)31 except AttributeError:32 continue33 if not predicate or predicate(value):34 results.append((key, value))35 results.sort()...

Full Screen

Full Screen

Pytest Tutorial

Looking for an in-depth tutorial around pytest? LambdaTest covers the detailed pytest tutorial that has everything related to the pytest, from setting up the pytest framework to automation testing. Delve deeper into pytest testing by exploring advanced use cases like parallel testing, pytest fixtures, parameterization, executing multiple test cases from a single file, and more.

Chapters

  1. What is pytest
  2. Pytest installation: Want to start pytest from scratch? See how to install and configure pytest for Python automation testing.
  3. Run first test with pytest framework: Follow this step-by-step tutorial to write and run your first pytest script.
  4. Parallel testing with pytest: A hands-on guide to parallel testing with pytest to improve the scalability of your test automation.
  5. Generate pytest reports: Reports make it easier to understand the results of pytest-based test runs. Learn how to generate pytest reports.
  6. Pytest Parameterized tests: Create and run your pytest scripts while avoiding code duplication and increasing test coverage with parameterization.
  7. Pytest Fixtures: Check out how to implement pytest fixtures for your end-to-end testing needs.
  8. Execute Multiple Test Cases: Explore different scenarios for running multiple test cases in pytest from a single file.
  9. Stop Test Suite after N Test Failures: See how to stop your test suite after n test failures in pytest using the @pytest.mark.incremental decorator and maxfail command-line option.

YouTube

Skim our below pytest tutorial playlist to get started with automation testing using the pytest framework.

https://www.youtube.com/playlist?list=PLZMWkkQEwOPlcGgDmHl8KkXKeLF83XlrP

Run Pytest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful