How to use test_types method in avocado

Best Python code snippet using avocado_python

perf_data_generator.py

Source:perf_data_generator.py Github

copy

Full Screen

1#!/usr/bin/env vpython2# Copyright 2016 The Chromium Authors. All rights reserved.3# Use of this source code is governed by a BSD-style license that can be4# found in the LICENSE file.5# pylint: disable=too-many-lines6"""Generates chromium.perf{,.fyi}.json from a set of condensed configs.7This file contains condensed configurations for the perf bots along with8logic to inflate those into the full (unwieldy) configurations in9//testing/buildbot that are consumed by the chromium recipe code.10"""11import argparse12import collections13import csv14import filecmp15import json16import os17import sys18import tempfile19import textwrap20from core import benchmark_finders21from core import benchmark_utils22from core import bot_platforms23from core import path_util24from core import undocumented_benchmarks as ub_module25path_util.AddTelemetryToPath()26from telemetry import decorators27# The condensed configurations below get inflated into the perf builder28# configurations in //testing/buildbot. The expected format of these is:29#30# {31# 'builder_name1': {32# # Targets that the builder should compile in addition to those33# # required for tests, as a list of strings.34# 'additional_compile_targets': ['target1', 'target2', ...],35#36# 'tests': [37# {38# # Arguments to pass to the test suite as a list of strings.39# 'extra_args': ['--arg1', '--arg2', ...],40#41# # Name of the isolate to run as a string.42# 'isolate': 'isolate_name',43#44# # Name of the test suite as a string.45# # If not present, will default to `isolate`.46# 'name': 'presentation_name',47#48# # The number of shards for this test as an int.49# 'num_shards': 2,50#51# # What kind of test this is; for options, see TEST_TYPES52# # below. Defaults to TELEMETRY.53# 'type': TEST_TYPES.TELEMETRY,54# },55# ...56# ],57#58# # Testing platform, as a string. Used in determining the browser59# # argument to pass to telemetry.60# 'platform': 'platform_name',61#62# # Dimensions to pass to swarming, as a dict of string keys & values.63# 'dimension': {64# 'dimension1_name': 'dimension1_value',65# ...66# },67# },68# ...69# }70class TEST_TYPES(object):71 GENERIC = 072 GTEST = 173 TELEMETRY = 274 ALL = (GENERIC, GTEST, TELEMETRY)75# TODO(crbug.com/902089): automatically generate --test-shard-map-filename76# arguments once we track all the perf FYI builders to core/bot_platforms.py77FYI_BUILDERS = {78 'android-nexus5x-perf-fyi': {79 'tests': [80 {81 'isolate': 'performance_test_suite',82 'extra_args': [83 '--output-format=histograms',84 '--test-shard-map-filename=android-nexus5x-perf-fyi_map.json',85 ],86 'num_shards': 387 }88 ],89 'platform': 'android-chrome',90 'dimension': {91 'pool': 'chrome.tests.perf-fyi',92 'os': 'Android',93 'device_type': 'bullhead',94 'device_os': 'MMB29Q',95 'device_os_flavor': 'google',96 },97 },98 'android-pixel2_webview-perf': {99 'tests': [100 {101 'isolate': 'performance_webview_test_suite',102 'extra_args': [103 '--test-shard-map-filename=android-pixel2_webview-perf_map.json',104 ],105 'num_shards': 7106 }107 ],108 'platform': 'android-webview-google',109 'dimension': {110 'pool': 'chrome.tests.perf-webview-fyi',111 'os': 'Android',112 'device_type': 'walleye',113 'device_os': 'O',114 'device_os_flavor': 'google',115 },116 },117 'android-pixel2-perf': {118 'tests': [119 {120 'isolate': 'performance_test_suite',121 'extra_args': [122 # TODO(crbug.com/612455): Enable ref builds once can pass both123 # --browser=exact (used by this bot to have it run Monochrome6432)124 # and --browser=reference together.125 #'--run-ref-build',126 '--test-shard-map-filename=android-pixel2-perf_map.json',127 ],128 'num_shards': 7129 }130 ],131 'platform': 'android-chrome',132 'browser': 'bin/monochrome_64_32_bundle',133 'dimension': {134 'pool': 'chrome.tests.perf-fyi',135 'os': 'Android',136 'device_type': 'walleye',137 'device_os': 'O',138 'device_os_flavor': 'google',139 },140 },141 'linux-perf-fyi': {142 'tests': [143 {144 'isolate': 'performance_test_suite',145 'extra_args': [146 '--benchmarks=%s' % ','.join((147 'blink_perf.layout_ng',148 'blink_perf.paint_layout_ng',149 'loading.desktop_layout_ng',150 )),151 '--output-format=histograms',152 ],153 'name': 'blink_perf.layout_ng',154 }155 ],156 'platform': 'linux',157 'dimension': {158 'gpu': '10de',159 'id': 'build186-b7',160 'pool': 'chrome.tests.perf-fyi',161 },162 },163}164# These configurations are taken from chromium_perf.py in165# build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync166# to generate the correct json for each tester167#168# On desktop builders, chromedriver is added as an additional compile target.169# The perf waterfall builds this target for each commit, and the resulting170# ChromeDriver is archived together with Chrome for use in bisecting.171# This can be used by Chrome test team, as well as by google3 teams for172# bisecting Chrome builds with their web tests. For questions or to report173# issues, please contact johnchen@chromium.org.174BUILDERS = {175 'android-builder-perf': {176 'additional_compile_targets': [177 'microdump_stackwalk', 'angle_perftests', 'chrome_apk'178 ],179 'tests': [180 {181 'name': 'resource_sizes_chrome_public_apk',182 'isolate': 'resource_sizes_chrome_public_apk',183 'type': TEST_TYPES.GENERIC,184 },185 {186 'name': 'resource_sizes_monochrome_public_minimal_apks',187 'isolate': 'resource_sizes_monochrome_public_minimal_apks',188 'type': TEST_TYPES.GENERIC,189 },190 {191 'name': 'resource_sizes_chrome_modern_public_minimal_apks',192 'isolate': 'resource_sizes_chrome_modern_public_minimal_apks',193 'type': TEST_TYPES.GENERIC,194 },195 {196 'name': 'resource_sizes_system_webview_apk',197 'isolate': 'resource_sizes_system_webview_apk',198 'type': TEST_TYPES.GENERIC,199 },200 ],201 'dimension': {202 'os': 'Ubuntu-14.04',203 'pool': 'chrome.tests',204 },205 'perf_trigger': False,206 },207 'android_arm64-builder-perf': {208 'additional_compile_targets': [209 'microdump_stackwalk', 'angle_perftests', 'chrome_apk'210 ],211 'tests': [212 {213 'name': 'resource_sizes_chrome_public_apk',214 'isolate': 'resource_sizes_chrome_public_apk',215 'type': TEST_TYPES.GENERIC,216 },217 {218 'name': 'resource_sizes_monochrome_public_minimal_apks',219 'isolate': 'resource_sizes_monochrome_public_minimal_apks',220 'type': TEST_TYPES.GENERIC,221 },222 {223 'name': 'resource_sizes_chrome_modern_public_minimal_apks',224 'isolate': 'resource_sizes_chrome_modern_public_minimal_apks',225 'type': TEST_TYPES.GENERIC,226 },227 {228 'name': 'resource_sizes_system_webview_apk',229 'isolate': 'resource_sizes_system_webview_apk',230 'type': TEST_TYPES.GENERIC,231 },232 ],233 'dimension': {234 'os': 'Ubuntu-14.04',235 'pool': 'chrome.tests',236 },237 'perf_trigger': False,238 },239 'linux-builder-perf': {240 'additional_compile_targets': ['chromedriver'],241 },242 'mac-builder-perf': {243 'additional_compile_targets': ['chromedriver'],244 },245 'win32-builder-perf': {246 'additional_compile_targets': ['chromedriver'],247 },248 'win64-builder-perf': {249 'additional_compile_targets': ['chromedriver'],250 },251 'android-go-perf': {252 'tests': [253 {254 'name': 'performance_test_suite',255 'isolate': 'performance_test_suite',256 'extra_args': [257 '--run-ref-build',258 '--test-shard-map-filename=android-go-perf_map.json',259 ],260 'num_shards': 19261 }262 ],263 'platform': 'android-chrome',264 'dimension': {265 'device_os': 'O',266 'device_type': 'gobo',267 'device_os_flavor': 'google',268 'pool': 'chrome.tests.perf',269 'os': 'Android',270 },271 },272 'android-go_webview-perf': {273 'tests': [274 {275 'isolate': 'performance_webview_test_suite',276 'extra_args': [277 '--test-shard-map-filename=android-go_webview-perf_map.json',278 ],279 'num_shards': 25280 }281 ],282 'platform': 'android-webview-google',283 'dimension': {284 'pool': 'chrome.tests.perf-webview',285 'os': 'Android',286 'device_type': 'gobo',287 'device_os': 'O',288 'device_os_flavor': 'google',289 },290 },291 'android-nexus5x-perf': {292 'tests': [293 {294 'isolate': 'performance_test_suite',295 'num_shards': 16,296 'extra_args': [297 '--run-ref-build',298 '--test-shard-map-filename=android-nexus5x-perf_map.json',299 '--assert-gpu-compositing',300 ],301 },302 {303 'isolate': 'media_perftests',304 'num_shards': 1,305 'type': TEST_TYPES.GTEST,306 },307 {308 'isolate': 'components_perftests',309 'num_shards': 1,310 'type': TEST_TYPES.GTEST,311 },312 {313 'isolate': 'tracing_perftests',314 'num_shards': 1,315 'type': TEST_TYPES.GTEST,316 },317 {318 'isolate': 'gpu_perftests',319 'num_shards': 1,320 'type': TEST_TYPES.GTEST,321 },322 {323 'isolate': 'angle_perftests',324 'num_shards': 1,325 'type': TEST_TYPES.GTEST,326 'extra_args': [327 '--shard-timeout=300'328 ],329 },330 {331 'isolate': 'base_perftests',332 'num_shards': 1,333 'type': TEST_TYPES.GTEST,334 }335 ],336 'platform': 'android',337 'dimension': {338 'pool': 'chrome.tests.perf',339 'os': 'Android',340 'device_type': 'bullhead',341 'device_os': 'MMB29Q',342 'device_os_flavor': 'google',343 },344 },345 'Android Nexus5 Perf': {346 'tests': [347 {348 'isolate': 'performance_test_suite',349 'num_shards': 16,350 'extra_args': [351 '--run-ref-build',352 '--test-shard-map-filename=android_nexus5_perf_map.json',353 '--assert-gpu-compositing',354 ],355 },356 {357 'isolate': 'tracing_perftests',358 'num_shards': 1,359 'type': TEST_TYPES.GTEST,360 },361 {362 'isolate': 'components_perftests',363 'num_shards': 1,364 'type': TEST_TYPES.GTEST,365 },366 {367 'isolate': 'gpu_perftests',368 'num_shards': 1,369 'type': TEST_TYPES.GTEST,370 },371 ],372 'platform': 'android',373 'dimension': {374 'pool': 'chrome.tests.perf',375 'os': 'Android',376 'device_type': 'hammerhead',377 'device_os': 'KOT49H',378 'device_os_flavor': 'google',379 },380 },381 'Android Nexus5X WebView Perf': {382 'tests': [383 {384 'isolate': 'performance_webview_test_suite',385 'num_shards': 16,386 'extra_args': [387 '--test-shard-map-filename=android_nexus5x_webview_perf_map.json',388 '--assert-gpu-compositing',389 ],390 }391 ],392 'platform': 'android-webview',393 'dimension': {394 'pool': 'chrome.tests.perf-webview',395 'os': 'Android',396 'device_type': 'bullhead',397 'device_os': 'MOB30K',398 'device_os_flavor': 'aosp',399 },400 },401 'Android Nexus6 WebView Perf': {402 'tests': [403 {404 'isolate': 'performance_webview_test_suite',405 'num_shards': 12,406 'extra_args': [407 '--test-shard-map-filename=android_nexus6_webview_perf_map.json',408 '--assert-gpu-compositing',409 ],410 }411 ],412 'platform': 'android-webview',413 'dimension': {414 'pool': 'chrome.tests.perf-webview',415 'os': 'Android',416 'device_type': 'shamu',417 'device_os': 'MOB30K',418 'device_os_flavor': 'aosp',419 },420 },421 'win-10-perf': {422 'tests': [423 {424 'isolate': 'performance_test_suite',425 'num_shards': 26,426 'extra_args': [427 '--run-ref-build',428 '--test-shard-map-filename=win-10-perf_map.json',429 '--assert-gpu-compositing',430 ],431 },432 {433 'isolate': 'angle_perftests',434 'num_shards': 1,435 'type': TEST_TYPES.GTEST,436 'extra_args': [437 '--shard-timeout=300'438 ],439 },440 {441 'isolate': 'media_perftests',442 'num_shards': 1,443 'type': TEST_TYPES.GTEST,444 },445 {446 'isolate': 'components_perftests',447 'num_shards': 1,448 'type': TEST_TYPES.GTEST,449 },450 {451 'isolate': 'views_perftests',452 'num_shards': 1,453 'type': TEST_TYPES.GTEST,454 },455 {456 'isolate': 'base_perftests',457 'num_shards': 1,458 'type': TEST_TYPES.GTEST,459 }460 ],461 'platform': 'win',462 'target_bits': 64,463 'dimension': {464 'pool': 'chrome.tests.perf',465 'os': 'Windows-10',466 'gpu': '8086:5912'467 },468 },469 'Win 7 Perf': {470 'tests': [471 {472 'isolate': 'performance_test_suite',473 'num_shards': 5,474 'extra_args': [475 '--run-ref-build',476 '--test-shard-map-filename=win_7_perf_map.json',477 ],478 },479 {480 'isolate': 'load_library_perf_tests',481 'num_shards': 1,482 'type': TEST_TYPES.GTEST,483 },484 {485 'isolate': 'components_perftests',486 'num_shards': 1,487 'type': TEST_TYPES.GTEST,488 },489 {490 'isolate': 'media_perftests',491 'num_shards': 1,492 'type': TEST_TYPES.GTEST,493 }494 ],495 'platform': 'win',496 'target_bits': 32,497 'dimension': {498 'pool': 'chrome.tests.perf',499 'os': 'Windows-2008ServerR2-SP1',500 'gpu': '102b:0532'501 },502 },503 'Win 7 Nvidia GPU Perf': {504 'tests': [505 {506 'isolate': 'performance_test_suite',507 'num_shards': 5,508 'extra_args': [509 '--run-ref-build',510 '--test-shard-map-filename=win_7_nvidia_gpu_perf_map.json',511 '--assert-gpu-compositing',512 ],513 },514 {515 'isolate': 'load_library_perf_tests',516 'num_shards': 1,517 'type': TEST_TYPES.GTEST,518 },519 {520 'isolate': 'angle_perftests',521 'num_shards': 1,522 'type': TEST_TYPES.GTEST,523 },524 {525 'isolate': 'media_perftests',526 'num_shards': 1,527 'type': TEST_TYPES.GTEST,528 },529 {530 'name': 'passthrough_command_buffer_perftests',531 'isolate': 'command_buffer_perftests',532 'num_shards': 1,533 'type': TEST_TYPES.GTEST,534 'extra_args': [535 '--use-cmd-decoder=passthrough',536 '--use-angle=gl-null',537 ],538 },539 {540 'name': 'validating_command_buffer_perftests',541 'isolate': 'command_buffer_perftests',542 'num_shards': 1,543 'type': TEST_TYPES.GTEST,544 'extra_args': [545 '--use-cmd-decoder=validating',546 '--use-stub',547 ],548 },549 ],550 'platform': 'win',551 'target_bits': 64,552 'dimension': {553 'pool': 'chrome.tests.perf',554 'os': 'Windows-2008ServerR2-SP1',555 'gpu': '10de:1cb3'556 },557 },558 'mac-10_12_laptop_low_end-perf': {559 'tests': [560 {561 'isolate': 'performance_test_suite',562 'num_shards': 26,563 'extra_args': [564 '--run-ref-build',565 ('--test-shard-map-filename='566 'mac-10_12_laptop_low_end-perf_map.json'),567 '--assert-gpu-compositing',568 ],569 },570 {571 'isolate': 'performance_browser_tests',572 'num_shards': 1,573 'type': TEST_TYPES.GTEST,574 },575 {576 'isolate': 'load_library_perf_tests',577 'num_shards': 1,578 'type': TEST_TYPES.GTEST,579 }580 ],581 'platform': 'mac',582 'dimension': {583 'pool': 'chrome.tests.perf',584 'os': 'Mac-10.12',585 'gpu': '8086:1626'586 },587 },588 'linux-perf': {589 'tests': [590 # Add views_perftests, crbug.com/811766591 {592 'isolate': 'performance_test_suite',593 'num_shards': 26,594 'extra_args': [595 '--run-ref-build',596 '--test-shard-map-filename=linux-perf_map.json',597 '--assert-gpu-compositing',598 ],599 },600 {601 'isolate': 'performance_browser_tests',602 'num_shards': 1,603 'type': TEST_TYPES.GTEST,604 },605 {606 'isolate': 'load_library_perf_tests',607 'num_shards': 1,608 'type': TEST_TYPES.GTEST,609 },610 {611 'isolate': 'net_perftests',612 'num_shards': 1,613 'type': TEST_TYPES.GTEST,614 },615 {616 'isolate': 'tracing_perftests',617 'num_shards': 1,618 'type': TEST_TYPES.GTEST,619 },620 {621 'isolate': 'media_perftests',622 'num_shards': 1,623 'type': TEST_TYPES.GTEST,624 },625 {626 'isolate': 'base_perftests',627 'num_shards': 1,628 'type': TEST_TYPES.GTEST,629 }630 ],631 'platform': 'linux',632 'dimension': {633 'gpu': '10de:1cb3',634 'os': 'Ubuntu-14.04',635 'pool': 'chrome.tests.perf',636 },637 },638 'mac-10_13_laptop_high_end-perf': {639 'tests': [640 {641 'isolate': 'performance_test_suite',642 'extra_args': [643 '--run-ref-build',644 '--test-shard-map-filename=mac-10_13_laptop_high_end-perf_map.json',645 '--assert-gpu-compositing',646 ],647 'num_shards': 26648 },649 {650 'isolate': 'performance_browser_tests',651 'num_shards': 1,652 'type': TEST_TYPES.GTEST,653 },654 {655 'isolate': 'net_perftests',656 'num_shards': 1,657 'type': TEST_TYPES.GTEST,658 },659 {660 'isolate': 'views_perftests',661 'num_shards': 1,662 'type': TEST_TYPES.GTEST,663 },664 {665 'isolate': 'media_perftests',666 'num_shards': 1,667 'type': TEST_TYPES.GTEST,668 },669 {670 'isolate': 'base_perftests',671 'num_shards': 1,672 'type': TEST_TYPES.GTEST,673 }674 ],675 'platform': 'mac',676 'dimension': {677 'pool': 'chrome.tests.perf',678 'os': 'Mac-10.13',679 'gpu': '1002:6821'680 },681 },682}683def update_all_tests(builders_dict, file_path):684 tests = {}685 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}686 tests['AAAAA2 See //tools/perf/generate_perf_data to make changes'] = {}687 for name, config in builders_dict.iteritems():688 tests[name] = generate_builder_config(config)689 with open(file_path, 'w') as fp:690 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)691 fp.write('\n')692def merge_dicts(*dict_args):693 result = {}694 for dictionary in dict_args:695 result.update(dictionary)696 return result697class BenchmarkMetadata(object):698 def __init__(self, emails, component='', documentation_url='', tags=''):699 self.emails = emails700 self.component = component701 self.documentation_url = documentation_url702 self.tags = tags703GTEST_BENCHMARKS = {704 'angle_perftests': BenchmarkMetadata(705 'jmadill@chromium.org, chrome-gpu-perf-owners@chromium.org',706 'Internals>GPU>ANGLE'),707 'base_perftests': BenchmarkMetadata(708 'skyostil@chromium.org, gab@chromium.org',709 'Internals>SequenceManager',710 ('https://chromium.googlesource.com/chromium/src/+/HEAD/base/' +711 'README.md#performance-testing')),712 'validating_command_buffer_perftests': BenchmarkMetadata(713 'piman@chromium.org, chrome-gpu-perf-owners@chromium.org',714 'Internals>GPU'),715 'passthrough_command_buffer_perftests': BenchmarkMetadata(716 'net-dev@chromium.org',717 'Internals>Network'),718 'net_perftests': BenchmarkMetadata(719 'net-dev@chromium.org',720 'Internals>Network'),721 'gpu_perftests': BenchmarkMetadata(722 'reveman@chromium.org, chrome-gpu-perf-owners@chromium.org',723 'Internals>GPU'),724 'tracing_perftests': BenchmarkMetadata(725 'kkraynov@chromium.org, primiano@chromium.org'),726 'load_library_perf_tests': BenchmarkMetadata(727 'xhwang@chromium.org, crouleau@chromium.org',728 'Internals>Media>Encrypted'),729 'performance_browser_tests': BenchmarkMetadata(730 'miu@chromium.org', 'Internals>Media>ScreenCapture'),731 'media_perftests': BenchmarkMetadata(732 'crouleau@chromium.org, dalecurtis@chromium.org',733 'Internals>Media'),734 'views_perftests': BenchmarkMetadata(735 'tapted@chromium.org', 'Internals>Views'),736 'components_perftests': BenchmarkMetadata('csharrison@chromium.org')737}738RESOURCE_SIZES_METADATA = BenchmarkMetadata(739 'agrieve@chromium.org, jbudorick@chromium.org, perezju@chromium.org',740 'BUILD',741 ('https://chromium.googlesource.com/chromium/src/+/HEAD/'742 'tools/binary_size/README.md#resource_sizes_py'))743OTHER_BENCHMARKS = {744 'resource_sizes_chrome_public_apk': RESOURCE_SIZES_METADATA,745 'resource_sizes_chrome_modern_public_minimal_apks': RESOURCE_SIZES_METADATA,746 'resource_sizes_monochrome_public_minimal_apks': RESOURCE_SIZES_METADATA,747 'resource_sizes_system_webview_apk': RESOURCE_SIZES_METADATA,748}749# If you change this dictionary, run tools/perf/generate_perf_data750NON_WATERFALL_BENCHMARKS = {751 'sizes (mac)':752 BenchmarkMetadata('tapted@chromium.org'),753 'sizes (win)': BenchmarkMetadata('grt@chromium.org',754 'Internals>PlatformIntegration'),755 'sizes (linux)': BenchmarkMetadata(756 'thestig@chromium.org', 'thomasanderson@chromium.org',757 'Internals>PlatformIntegration'),758 'supersize_archive': BenchmarkMetadata('agrieve@chromium.org'),759}760def _get_telemetry_perf_benchmarks_metadata():761 metadata = {}762 benchmark_list = benchmark_finders.GetOfficialBenchmarks()763 for benchmark in benchmark_list:764 emails = decorators.GetEmails(benchmark)765 if emails:766 emails = ', '.join(emails)767 tags_set = benchmark_utils.GetStoryTags(benchmark())768 metadata[benchmark.Name()] = BenchmarkMetadata(769 emails, decorators.GetComponent(benchmark),770 decorators.GetDocumentationLink(benchmark),771 ','.join(tags_set))772 return metadata773TELEMETRY_PERF_BENCHMARKS = _get_telemetry_perf_benchmarks_metadata()774def get_scheduled_non_telemetry_benchmarks(perf_waterfall_file):775 test_names = set()776 with open(perf_waterfall_file) as f:777 tests_by_builder = json.load(f)778 script_tests = []779 for tests in tests_by_builder.values():780 if 'isolated_scripts' in tests:781 script_tests += tests['isolated_scripts']782 if 'scripts' in tests:783 script_tests += tests['scripts']784 for s in script_tests:785 name = s['name']786 # TODO(eyaich): Determine new way to generate ownership based787 # on the benchmark bot map instead of on the generated tests788 # for new perf recipe.789 if not name in ('performance_test_suite',790 'performance_webview_test_suite'):791 test_names.add(name)792 return test_names793def is_perf_benchmarks_scheduling_valid(794 perf_waterfall_file, outstream):795 """Validates that all existing benchmarks are properly scheduled.796 Return: True if all benchmarks are properly scheduled, False otherwise.797 """798 scheduled_non_telemetry_tests = get_scheduled_non_telemetry_benchmarks(799 perf_waterfall_file)800 all_perf_gtests = set(GTEST_BENCHMARKS)801 all_perf_other_tests = set(OTHER_BENCHMARKS)802 error_messages = []803 for test_name in all_perf_gtests - scheduled_non_telemetry_tests:804 error_messages.append(805 'Benchmark %s is tracked but not scheduled on any perf waterfall '806 'builders. Either schedule or remove it from GTEST_BENCHMARKS.'807 % test_name)808 for test_name in all_perf_other_tests - scheduled_non_telemetry_tests:809 error_messages.append(810 'Benchmark %s is tracked but not scheduled on any perf waterfall '811 'builders. Either schedule or remove it from OTHER_BENCHMARKS.'812 % test_name)813 for test_name in scheduled_non_telemetry_tests.difference(814 all_perf_gtests, all_perf_other_tests):815 error_messages.append(816 'Benchmark %s is scheduled on perf waterfall but not tracked. Please '817 'add an entry for it in GTEST_BENCHMARKS or OTHER_BENCHMARKS in'818 '//tools/perf/core/perf_data_generator.py.' % test_name)819 for message in error_messages:820 print >> outstream, '*', textwrap.fill(message, 70), '\n'821 return not error_messages822# Verify that all benchmarks have owners except those on the whitelist.823def _verify_benchmark_owners(benchmark_metadatas):824 unowned_benchmarks = set()825 for benchmark_name in benchmark_metadatas:826 if benchmark_metadatas[benchmark_name].emails is None:827 unowned_benchmarks.add(benchmark_name)828 assert not unowned_benchmarks, (829 'All benchmarks must have owners. Please add owners for the following '830 'benchmarks:\n%s' % '\n'.join(unowned_benchmarks))831def update_benchmark_csv(file_path):832 """Updates go/chrome-benchmarks.833 Updates telemetry/perf/benchmark.csv containing the current benchmark names,834 owners, and components. Requires that all benchmarks have owners.835 """836 header_data = [['AUTOGENERATED FILE DO NOT EDIT'],837 ['See https://bit.ly/update-benchmarks-info to make changes'],838 ['Benchmark name', 'Individual owners', 'Component', 'Documentation',839 'Tags']840 ]841 csv_data = []842 benchmark_metadatas = merge_dicts(843 GTEST_BENCHMARKS, OTHER_BENCHMARKS, TELEMETRY_PERF_BENCHMARKS,844 NON_WATERFALL_BENCHMARKS)845 _verify_benchmark_owners(benchmark_metadatas)846 undocumented_benchmarks = set()847 for benchmark_name in benchmark_metadatas:848 if not benchmark_metadatas[benchmark_name].documentation_url:849 undocumented_benchmarks.add(benchmark_name)850 csv_data.append([851 benchmark_name,852 benchmark_metadatas[benchmark_name].emails,853 benchmark_metadatas[benchmark_name].component,854 benchmark_metadatas[benchmark_name].documentation_url,855 benchmark_metadatas[benchmark_name].tags,856 ])857 if undocumented_benchmarks != ub_module.UNDOCUMENTED_BENCHMARKS:858 error_message = (859 'The list of known undocumented benchmarks does not reflect the actual '860 'ones.\n')861 if undocumented_benchmarks - ub_module.UNDOCUMENTED_BENCHMARKS:862 error_message += (863 'New undocumented benchmarks found. Please document them before '864 'enabling on perf waterfall: %s' % (865 ','.join(b for b in undocumented_benchmarks -866 ub_module.UNDOCUMENTED_BENCHMARKS)))867 if ub_module.UNDOCUMENTED_BENCHMARKS - undocumented_benchmarks:868 error_message += (869 'These benchmarks are already documented. Please remove them from '870 'the UNDOCUMENTED_BENCHMARKS list in undocumented_benchmarks.py: %s' %871 (','.join(b for b in ub_module.UNDOCUMENTED_BENCHMARKS -872 undocumented_benchmarks)))873 raise ValueError(error_message)874 csv_data = sorted(csv_data, key=lambda b: b[0])875 csv_data = header_data + csv_data876 with open(file_path, 'wb') as f:877 writer = csv.writer(f, lineterminator="\n")878 writer.writerows(csv_data)879def update_labs_docs_md(filepath):880 configs = collections.defaultdict(list)881 for tester in bot_platforms.ALL_PLATFORMS:882 if not tester.is_fyi:883 configs[tester.platform].append(tester)884 with open(filepath, 'w') as f:885 f.write("""886[comment]: # (AUTOGENERATED FILE DO NOT EDIT)887[comment]: # (See //tools/perf/generate_perf_data to make changes)888# Platforms tested in the Performance Lab889""")890 for platform, testers in sorted(configs.iteritems()):891 f.write('## %s\n\n' % platform.title())892 testers.sort()893 for tester in testers:894 f.write(' * [{0.name}]({0.buildbot_url}): {0.description}.\n'.format(895 tester))896 f.write('\n')897def validate_waterfall(builders_dict, waterfall_file):898 waterfall_tempfile = tempfile.NamedTemporaryFile(delete=False).name899 try:900 update_all_tests(builders_dict, waterfall_tempfile)901 return filecmp.cmp(waterfall_file, waterfall_tempfile)902 finally:903 os.remove(waterfall_tempfile)904def validate_benchmark_csv(benchmark_file):905 benchmark_tempfile = tempfile.NamedTemporaryFile(delete=False).name906 try:907 update_benchmark_csv(benchmark_tempfile)908 return filecmp.cmp(benchmark_file, benchmark_tempfile)909 finally:910 os.remove(benchmark_tempfile)911def validate_docs(labs_docs_file):912 labs_docs_tempfile = tempfile.NamedTemporaryFile(delete=False).name913 try:914 update_labs_docs_md(labs_docs_tempfile)915 return filecmp.cmp(labs_docs_file, labs_docs_tempfile)916 finally:917 os.remove(labs_docs_tempfile)918def generate_telemetry_args(tester_config):919 # First determine the browser that you need based on the tester920 browser_name = ''921 # For trybot testing we always use the reference build922 if tester_config.get('testing', False):923 browser_name = 'reference'924 elif 'browser' in tester_config:925 browser_name = 'exact'926 elif tester_config['platform'] == 'android':927 browser_name = 'android-chromium'928 elif tester_config['platform'].startswith('android-'):929 browser_name = tester_config['platform']930 elif (tester_config['platform'] == 'win'931 and tester_config['target_bits'] == 64):932 browser_name = 'release_x64'933 else:934 browser_name ='release'935 test_args = [936 '-v',937 '--browser=%s' % browser_name,938 '--upload-results'939 ]940 if 'browser' in tester_config:941 test_args.append('--browser-executable=../../out/Release/%s' %942 tester_config['browser'])943 if tester_config['platform'].startswith('android'):944 test_args.append('--device=android')945 if tester_config['platform'].startswith('android-webview'):946 test_args.append(947 '--webview-embedder-apk=../../out/Release/apks/SystemWebViewShell.apk')948 return test_args949def generate_gtest_args(test_name):950 # --gtest-benchmark-name so the benchmark name is consistent with the test951 # step's name. This is not always the same as the test binary's name (see952 # crbug.com/870692).953 return [954 '--gtest-benchmark-name', test_name,955 ]956def generate_performance_test(tester_config, test):957 isolate_name = test['isolate']958 test_name = test.get('name', isolate_name)959 test_type = test.get('type', TEST_TYPES.TELEMETRY)960 assert test_type in TEST_TYPES.ALL961 test_args = []962 if test_type == TEST_TYPES.TELEMETRY:963 test_args += generate_telemetry_args(tester_config)964 elif test_type == TEST_TYPES.GTEST:965 test_args += generate_gtest_args(test_name=test_name)966 # Append any additional args specific to an isolate967 test_args += test.get('extra_args', [])968 result = {969 'args': test_args,970 'isolate_name': isolate_name,971 'name': test_name,972 'override_compile_targets': [973 isolate_name974 ]975 }976 # For now we either get shards from the number of devices specified977 # or a test entry needs to specify the num shards if it supports978 # soft device affinity.979 if tester_config.get('perf_trigger', True):980 result['trigger_script'] = {981 'requires_simultaneous_shard_dispatch': True,982 'script': '//testing/trigger_scripts/perf_device_trigger.py',983 'args': [984 '--multiple-dimension-script-verbose',985 'True'986 ],987 }988 result['merge'] = {989 'script': '//tools/perf/process_perf_results.py',990 }991 shards = test.get('num_shards')992 result['swarming'] = {993 # Always say this is true regardless of whether the tester994 # supports swarming. It doesn't hurt.995 'can_use_on_swarming_builders': True,996 'expiration': 2 * 60 * 60, # 2 hours pending max997 # TODO(crbug.com/865538): once we have plenty of windows hardwares,998 # to shards perf benchmarks on Win builders, reduce this hard timeout limit999 # to ~2 hrs.1000 'hard_timeout': 10 * 60 * 60, # 10 hours timeout for full suite1001 'ignore_task_failure': False,1002 'io_timeout': 30 * 60, # 30 minutes1003 'dimension_sets': [1004 tester_config['dimension']1005 ],1006 }1007 if shards:1008 result['swarming']['shards'] = shards1009 return result1010def generate_builder_config(condensed_config):1011 config = {}1012 if 'additional_compile_targets' in condensed_config:1013 config['additional_compile_targets'] = (1014 condensed_config['additional_compile_targets'])1015 condensed_tests = condensed_config.get('tests')1016 if condensed_tests:1017 gtest_tests = []1018 telemetry_tests = []1019 other_tests = []1020 for test in condensed_tests:1021 generated_script = generate_performance_test(condensed_config, test)1022 test_type = test.get('type', TEST_TYPES.TELEMETRY)1023 if test_type == TEST_TYPES.GTEST:1024 gtest_tests.append(generated_script)1025 elif test_type == TEST_TYPES.TELEMETRY:1026 telemetry_tests.append(generated_script)1027 elif test_type == TEST_TYPES.GENERIC:1028 other_tests.append(generated_script)1029 else:1030 raise ValueError(1031 'perf_data_generator.py does not understand test type %s.'1032 % test_type)1033 gtest_tests.sort(key=lambda x: x['name'])1034 telemetry_tests.sort(key=lambda x: x['name'])1035 other_tests.sort(key=lambda x: x['name'])1036 # Put Telemetry tests as the end since they tend to run longer to avoid1037 # starving gtests (see crbug.com/873389).1038 config['isolated_scripts'] = gtest_tests + telemetry_tests + other_tests1039 return config1040def main(args):1041 parser = argparse.ArgumentParser(1042 description=('Generate perf test\' json config and benchmark.csv. '1043 'This needs to be done anytime you add/remove any existing'1044 'benchmarks in tools/perf/benchmarks.'))1045 parser.add_argument(1046 '--validate-only', action='store_true', default=False,1047 help=('Validate whether the perf json generated will be the same as the '1048 'existing configs. This does not change the contain of existing '1049 'configs'))1050 options = parser.parse_args(args)1051 perf_waterfall_file = os.path.join(1052 path_util.GetChromiumSrcDir(), 'testing', 'buildbot',1053 'chromium.perf.json')1054 fyi_waterfall_file = os.path.join(1055 path_util.GetChromiumSrcDir(), 'testing', 'buildbot',1056 'chromium.perf.fyi.json')1057 benchmark_file = os.path.join(1058 path_util.GetChromiumSrcDir(), 'tools', 'perf', 'benchmark.csv')1059 labs_docs_file = os.path.join(1060 path_util.GetChromiumSrcDir(), 'docs', 'speed', 'perf_lab_platforms.md')1061 return_code = 01062 if options.validate_only:1063 if (validate_waterfall(BUILDERS, perf_waterfall_file)1064 and validate_waterfall(FYI_BUILDERS, fyi_waterfall_file)1065 and validate_benchmark_csv(benchmark_file)1066 and validate_docs(labs_docs_file)1067 and is_perf_benchmarks_scheduling_valid(1068 perf_waterfall_file, outstream=sys.stderr)):1069 print 'All the perf config files are up-to-date. \\o/'1070 return 01071 else:1072 print ('Not all perf config files are up-to-date. Please run %s '1073 'to update them.') % sys.argv[0]1074 return 11075 else:1076 update_all_tests(FYI_BUILDERS, fyi_waterfall_file)1077 update_all_tests(BUILDERS, perf_waterfall_file)1078 update_benchmark_csv(benchmark_file)1079 update_labs_docs_md(labs_docs_file)1080 if not is_perf_benchmarks_scheduling_valid(1081 perf_waterfall_file, outstream=sys.stderr):1082 return_code = 1...

Full Screen

Full Screen

debugger_utils_test.py

Source:debugger_utils_test.py Github

copy

Full Screen

1#!/usr/bin/env python2"""Tests for the core module."""3import unittest4import lldb5from lldb_script_utils import debugger_utils6TEST_PACKAGE = f'{__package__}.debugger_utils_test'7TEST_CLASS = f'{TEST_PACKAGE}._TestClass'8TEST_FUNCTION = f'{TEST_PACKAGE}._test_function'9TEST_SUMMARY_FUNCTION = f'{TEST_PACKAGE}._type_summary_function'10TEST_COMMAND = 'testCommand'11TEST_HELP = 'Help for testCommand'12TEST_TYPES = ['int *', 'bool']13TEST_TYPE_NAMES = "'int *' bool"14class _TestClass:15 class Inner:16 pass17def _test_function():18 pass19def _type_summary_function(unused_: lldb.SBValue, _: dict) -> str:20 return ''21class _TestDebugger(lldb.SBDebugger):22 def __init__(self, *args):23 super().__init__(*args)24 self.handled_command = ''25 def HandleCommand(self, command):26 self.handled_command = command27class CoreTest(unittest.TestCase):28 def test_format_fully_qualified_type_name(self):29 self.assertEqual(30 f'{TEST_CLASS}',31 debugger_utils.format_fully_qualified_type_name(_TestClass))32 self.assertEqual(33 f'{TEST_CLASS}.Inner',34 debugger_utils.format_fully_qualified_type_name(_TestClass.Inner))35 self.assertEqual('int',36 debugger_utils.format_fully_qualified_type_name(int))37 def test_format_command_script_add(self):38 debugger = _TestDebugger()39 debugger_utils.handle_command_script_add(debugger, TEST_COMMAND,40 _test_function)41 self.assertEqual(42 f'command script add --function {TEST_FUNCTION} {TEST_COMMAND}',43 debugger.handled_command)44 debugger_utils.handle_command_script_add(debugger,45 TEST_COMMAND,46 _test_function,47 help=TEST_HELP)48 self.assertEqual(49 f'command script add --function {TEST_FUNCTION} ' +50 f"--help '{TEST_HELP}' {TEST_COMMAND}", debugger.handled_command)51 debugger_utils.handle_command_script_add(debugger,52 TEST_COMMAND,53 _test_function,54 help=TEST_HELP,55 synchronicity='synchronous')56 self.assertEqual(57 f'command script add --function {TEST_FUNCTION} ' +58 f"--help '{TEST_HELP}' --synchronicity synchronous {TEST_COMMAND}",59 debugger.handled_command)60 debugger_utils.handle_command_script_add(debugger,61 TEST_COMMAND,62 _test_function,63 synchronicity='asynchronous')64 self.assertEqual(65 f'command script add --function {TEST_FUNCTION} ' +66 f'--synchronicity asynchronous {TEST_COMMAND}',67 debugger.handled_command)68 debugger_utils.handle_command_script_add(debugger, TEST_COMMAND,69 _TestClass)70 self.assertEqual(71 f'command script add --class {TEST_CLASS} {TEST_COMMAND}',72 debugger.handled_command)73 debugger_utils.handle_command_script_add(debugger,74 TEST_COMMAND,75 _TestClass.Inner,76 synchronicity='current')77 self.assertEqual(78 f'command script add --class {TEST_CLASS}.Inner ' +79 f'--synchronicity current {TEST_COMMAND}',80 debugger.handled_command)81 def test_format_type_summary_add(self):82 debugger = _TestDebugger()83 debugger_utils.handle_type_summary_add(debugger,84 *TEST_TYPES,85 inline_children=True)86 self.assertEqual(87 'type summary add --inline-children ' + TEST_TYPE_NAMES,88 debugger.handled_command)89 debugger_utils.handle_type_summary_add(debugger,90 *TEST_TYPES,91 omit_names=True)92 self.assertEqual('type summary add --omit-names ' + TEST_TYPE_NAMES,93 debugger.handled_command)94 debugger_utils.handle_type_summary_add(debugger,95 *TEST_TYPES,96 expand=True)97 self.assertEqual('type summary add --expand ' + TEST_TYPE_NAMES,98 debugger.handled_command)99 debugger_utils.handle_type_summary_add(debugger,100 *TEST_TYPES,101 hide_empty=True)102 self.assertEqual('type summary add --hide-empty ' + TEST_TYPE_NAMES,103 debugger.handled_command)104 debugger_utils.handle_type_summary_add(debugger,105 *TEST_TYPES,106 skip_pointers=True)107 self.assertEqual('type summary add --skip-pointers ' + TEST_TYPE_NAMES,108 debugger.handled_command)109 debugger_utils.handle_type_summary_add(debugger,110 *TEST_TYPES,111 skip_references=True)112 self.assertEqual(113 'type summary add --skip-references ' + TEST_TYPE_NAMES,114 debugger.handled_command)115 debugger_utils.handle_type_summary_add(debugger,116 *TEST_TYPES,117 no_value=True)118 self.assertEqual('type summary add --no-value ' + TEST_TYPE_NAMES,119 debugger.handled_command)120 debugger_utils.handle_type_summary_add(debugger,121 *TEST_TYPES,122 regex=True)123 self.assertEqual('type summary add --regex ' + TEST_TYPE_NAMES,124 debugger.handled_command)125 debugger_utils.handle_type_summary_add(debugger,126 *TEST_TYPES,127 summary_string='short_summary')128 self.assertEqual(129 'type summary add --summary-string short_summary ' +130 TEST_TYPE_NAMES, debugger.handled_command)131 debugger_utils.handle_type_summary_add(debugger,132 *TEST_TYPES,133 summary_string='long summary')134 self.assertEqual(135 "type summary add --summary-string 'long summary' " +136 TEST_TYPE_NAMES, debugger.handled_command)137 debugger_utils.handle_type_summary_add(debugger,138 *TEST_TYPES,139 cascade=True)140 self.assertEqual('type summary add --cascade true ' + TEST_TYPE_NAMES,141 debugger.handled_command)142 debugger_utils.handle_type_summary_add(debugger,143 *TEST_TYPES,144 cascade=False)145 self.assertEqual('type summary add --cascade false ' + TEST_TYPE_NAMES,146 debugger.handled_command)147 debugger_utils.handle_type_summary_add(148 debugger, *TEST_TYPES, python_function=_type_summary_function)149 self.assertEqual(150 f'type summary add --python-function {TEST_SUMMARY_FUNCTION} ' +151 TEST_TYPE_NAMES, debugger.handled_command)152 debugger_utils.handle_type_summary_add(debugger,153 *TEST_TYPES,154 python_script='short_script')155 self.assertEqual(156 'type summary add --python-script short_script ' + TEST_TYPE_NAMES,157 debugger.handled_command)158 debugger_utils.handle_type_summary_add(debugger,159 *TEST_TYPES,160 python_script='long script')161 self.assertEqual(162 "type summary add --python-script 'long script' " +163 TEST_TYPE_NAMES, debugger.handled_command)164 debugger_utils.handle_type_summary_add(debugger,165 *TEST_TYPES,166 category='short_category')167 self.assertEqual(168 'type summary add --category short_category ' + TEST_TYPE_NAMES,169 debugger.handled_command)170 debugger_utils.handle_type_summary_add(debugger,171 *TEST_TYPES,172 category='long category')173 self.assertEqual(174 "type summary add --category 'long category' " + TEST_TYPE_NAMES,175 debugger.handled_command)176 debugger_utils.handle_type_summary_add(debugger,177 *TEST_TYPES,178 name='short_name')179 self.assertEqual(180 'type summary add --name short_name ' + TEST_TYPE_NAMES,181 debugger.handled_command)182 debugger_utils.handle_type_summary_add(debugger,183 *TEST_TYPES,184 name='long name')185 self.assertEqual(186 "type summary add --name 'long name' " + TEST_TYPE_NAMES,187 debugger.handled_command)188 def test_format_type_synthetic_add(self):189 debugger = _TestDebugger()190 debugger_utils.handle_type_synthetic_add(debugger,191 *TEST_TYPES,192 skip_pointers=True)193 self.assertEqual(194 'type synthetic add --skip-pointers ' + TEST_TYPE_NAMES,195 debugger.handled_command)196 debugger_utils.handle_type_synthetic_add(debugger,197 *TEST_TYPES,198 skip_references=True)199 self.assertEqual(200 'type synthetic add --skip-references ' + TEST_TYPE_NAMES,201 debugger.handled_command)202 debugger_utils.handle_type_synthetic_add(debugger,203 *TEST_TYPES,204 regex=True)205 self.assertEqual('type synthetic add --regex ' + TEST_TYPE_NAMES,206 debugger.handled_command)207 debugger_utils.handle_type_synthetic_add(debugger,208 *TEST_TYPES,209 cascade=True)210 self.assertEqual(211 'type synthetic add --cascade true ' + TEST_TYPE_NAMES,212 debugger.handled_command)213 debugger_utils.handle_type_synthetic_add(debugger,214 *TEST_TYPES,215 cascade=False)216 self.assertEqual(217 'type synthetic add --cascade false ' + TEST_TYPE_NAMES,218 debugger.handled_command)219 debugger_utils.handle_type_synthetic_add(debugger,220 *TEST_TYPES,221 category='short_category')222 self.assertEqual(223 'type synthetic add --category short_category ' + TEST_TYPE_NAMES,224 debugger.handled_command)225 debugger_utils.handle_type_synthetic_add(debugger,226 *TEST_TYPES,227 category='long category')228 self.assertEqual(229 "type synthetic add --category 'long category' " + TEST_TYPE_NAMES,230 debugger.handled_command)231 debugger_utils.handle_type_synthetic_add(debugger,232 *TEST_TYPES,233 python_class=_TestClass)234 self.assertEqual(235 f'type synthetic add --python-class {TEST_CLASS} ' +236 TEST_TYPE_NAMES, debugger.handled_command)237if __name__ == '__main__':...

Full Screen

Full Screen

pt_dataloader.py

Source:pt_dataloader.py Github

copy

Full Screen

1import os.path2from typing import List, Any, Callable, Optional3from torch.utils.data import Dataset4from nlptest.types import BehaviorType5from nlptest.behavior import Behavior6from .testpack import TestPack7class PyTorchTestPack(Dataset):8 def __init__(self, capabilities: List[str], names: List[str], test_types: List[BehaviorType], texts: List[str],9 labels: List[Any], processor: Optional[Callable] = None):10 """11 :param capabilities:12 :param names:13 :param test_types:14 :param texts:15 :param labels:16 :param processor:17 :return:18 """19 assert len(capabilities) == len(names) == len(test_types) == len(texts) == len(labels)20 self.capabilities = capabilities21 self.names = names22 self.test_types = test_types23 self.texts = texts24 self.labels = labels25 self.processor = processor26 def __len__(self):27 return len(self.texts)28 def __getitem__(self, idx):29 if self.processor is not None:30 return self.processor(**{31 "capability": self.capabilities[idx],32 "name": self.names[idx],33 "test_type": self.test_types[idx],34 "text": self.texts[idx],35 "labels": self.labels[idx]36 })37 return {38 "capability": self.capabilities[idx],39 "name": self.names[idx],40 "test_type": self.test_types[idx],41 "text": self.texts[idx],42 "labels": self.labels[idx]43 }44 @classmethod45 def from_testpack(cls, testpack: TestPack, processor: Callable = None):46 """Constructs a PyTorch Dataset from a TestPack"""47 capabilities, names, test_types, texts, all_labels = [], [], [], [], []48 behaviors = list(testpack.behaviors)49 for behavior in behaviors:50 for sample, labels in zip(behavior.samples, behavior.labels):51 capabilities.append(behavior.capability)52 names.append(behavior.name)53 test_types.append(behavior.test_type.value)54 texts.append(sample)55 all_labels.append(labels)56 return cls(capabilities, names, test_types, texts, all_labels, processor)57 @classmethod58 def from_saved_behaviors(cls, folder_path: str, processor: Callable = None):59 """"""60 assert os.path.isdir(folder_path), "Please provide a path to a folder."61 files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith(".pkl")]62 capabilities, names, test_types, texts, all_labels = [], [], [], [], []63 for f in files:64 behavior = Behavior.from_file(f)65 for sample, labels in zip(behavior.samples, behavior.labels):66 capabilities.append(behavior.capability)67 names.append(behavior.name)68 test_types.append(behavior.test_type.value)69 texts.append(sample)70 all_labels.append(labels)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful