Best Python code snippet using molotov_python
chrome_tests.py
Source:chrome_tests.py  
1#!/usr/bin/env python2# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.3#4# Use of this source code is governed by a BSD-style license5# that can be found in the LICENSE file in the root of the source6# tree. An additional intellectual property rights grant can be found7# in the file PATENTS.  All contributing project authors may8# be found in the AUTHORS file in the root of the source tree.9''' Runs various chrome tests through valgrind_test.py.'''10import glob11import logging12import multiprocessing13import optparse14import os15import stat16import subprocess17import sys18import logging_utils19import path_utils20import common21import valgrind_test22class TestNotFound(Exception): pass23class MultipleGTestFiltersSpecified(Exception): pass24class BuildDirNotFound(Exception): pass25class BuildDirAmbiguous(Exception): pass26class ExecutableNotFound(Exception): pass27class BadBinary(Exception): pass28class ChromeTests:29  SLOW_TOOLS = ["memcheck", "drmemory"]30  LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 30031  def __init__(self, options, args, test):32    if ':' in test:33      (self._test, self._gtest_filter) = test.split(':', 1)34    else:35      self._test = test36      self._gtest_filter = options.gtest_filter37    if self._test not in self._test_list:38      raise TestNotFound("Unknown test: %s" % test)39    if options.gtest_filter and options.gtest_filter != self._gtest_filter:40      raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "41                                          "and --test %s" % test)42    self._options = options43    self._args = args44    script_dir = path_utils.ScriptDir()45    # Compute the top of the tree (the "source dir") from the script dir (where46    # this script lives).  We assume that the script dir is in tools/valgrind/47    # relative to the top of the tree.48    self._source_dir = os.path.dirname(os.path.dirname(script_dir))49    # since this path is used for string matching, make sure it's always50    # an absolute Unix-style path51    self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')52    valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")53    self._command_preamble = ["--source-dir=%s" % (self._source_dir)]54    if not self._options.build_dir:55      dirs = [56        os.path.join(self._source_dir, "xcodebuild", "Debug"),57        os.path.join(self._source_dir, "out", "Debug"),58        os.path.join(self._source_dir, "build", "Debug"),59      ]60      build_dir = [d for d in dirs if os.path.isdir(d)]61      if len(build_dir) > 1:62        raise BuildDirAmbiguous("Found more than one suitable build dir:\n"63                                "%s\nPlease specify just one "64                                "using --build-dir" % ", ".join(build_dir))65      elif build_dir:66        self._options.build_dir = build_dir[0]67      else:68        self._options.build_dir = None69    if self._options.build_dir:70      build_dir = os.path.abspath(self._options.build_dir)71      self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]72  def _EnsureBuildDirFound(self):73    if not self._options.build_dir:74      raise BuildDirNotFound("Oops, couldn't find a build dir, please "75                             "specify it manually using --build-dir")76  def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):77    '''Generates the default command array that most tests will use.'''78    if exe and common.IsWindows():79      exe += '.exe'80    cmd = list(self._command_preamble)81    # Find all suppressions matching the following pattern:82    # tools/valgrind/TOOL/suppressions[_PLATFORM].txt83    # and list them with --suppressions= prefix.84    script_dir = path_utils.ScriptDir()85    tool_name = tool.ToolName();86    suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")87    if os.path.exists(suppression_file):88      cmd.append("--suppressions=%s" % suppression_file)89    # Platform-specific suppression90    for platform in common.PlatformNames():91      platform_suppression_file = \92          os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)93      if os.path.exists(platform_suppression_file):94        cmd.append("--suppressions=%s" % platform_suppression_file)95    if tool_name == "drmemory":96      if self._options.drmemory_ops:97        # prepending " " to avoid Dr. Memory's option confusing optparse98        cmd += ["--drmemory_ops", " " + self._options.drmemory_ops]99    if self._options.valgrind_tool_flags:100      cmd += self._options.valgrind_tool_flags.split(" ")101    if self._options.keep_logs:102      cmd += ["--keep_logs"]103    if valgrind_test_args != None:104      for arg in valgrind_test_args:105        cmd.append(arg)106    if exe:107      self._EnsureBuildDirFound()108      exe_path = os.path.join(self._options.build_dir, exe)109      if not os.path.exists(exe_path):110        raise ExecutableNotFound("Couldn't find '%s'" % exe_path)111      # Make sure we don't try to test ASan-built binaries112      # with other dynamic instrumentation-based tools.113      # TODO(timurrrr): also check TSan and MSan?114      # `nm` might not be available, so use try-except.115      try:116        # Do not perform this check on OS X, as 'nm' on 10.6 can't handle117        # binaries built with Clang 3.5+.118        if not common.IsMac():119          nm_output = subprocess.check_output(["nm", exe_path])120          if nm_output.find("__asan_init") != -1:121            raise BadBinary("You're trying to run an executable instrumented "122                            "with AddressSanitizer under %s. Please provide "123                            "an uninstrumented executable." % tool_name)124      except OSError:125        pass126      cmd.append(exe_path)127      # Valgrind runs tests slowly, so slow tests hurt more; show elapased time128      # so we can find the slowpokes.129      cmd.append("--gtest_print_time")130      # Built-in test launcher for gtest-based executables runs tests using131      # multiple process by default. Force the single-process mode back.132      cmd.append("--single-process-tests")133    if self._options.gtest_repeat:134      cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)135    if self._options.gtest_shuffle:136      cmd.append("--gtest_shuffle")137    if self._options.gtest_break_on_failure:138      cmd.append("--gtest_break_on_failure")139    if self._options.test_launcher_bot_mode:140      cmd.append("--test-launcher-bot-mode")141    if self._options.test_launcher_total_shards is not None:142      cmd.append("--test-launcher-total-shards=%d"143                 % self._options.test_launcher_total_shards)144    if self._options.test_launcher_shard_index is not None:145      cmd.append("--test-launcher-shard-index=%d"146                 % self._options.test_launcher_shard_index)147    return cmd148  def Run(self):149    ''' Runs the test specified by command-line argument --test '''150    logging.info("running test %s" % (self._test))151    return self._test_list[self._test](self)152  def _AppendGtestFilter(self, tool, name, cmd):153    '''Append an appropriate --gtest_filter flag to the googletest binary154       invocation.155       If the user passed their own filter mentioning only one test, just use156       it. Otherwise, filter out tests listed in the appropriate gtest_exclude157       files.158    '''159    if (self._gtest_filter and160        ":" not in self._gtest_filter and161        "?" not in self._gtest_filter and162        "*" not in self._gtest_filter):163      cmd.append("--gtest_filter=%s" % self._gtest_filter)164      return165    filters = []166    gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")167    gtest_filter_files = [168        os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]169    # Use ".gtest.txt" files only for slow tools, as they now contain170    # Valgrind- and Dr.Memory-specific filters.171    # TODO(glider): rename the files to ".gtest_slow.txt"172    if tool.ToolName() in ChromeTests.SLOW_TOOLS:173      gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]174    for platform_suffix in common.PlatformNames():175      gtest_filter_files += [176        os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),177        os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \178            (tool.ToolName(), platform_suffix))]179    logging.info("Reading gtest exclude filter files:")180    for filename in gtest_filter_files:181      # strip the leading absolute path (may be very long on the bot)182      # and the following / or \.183      readable_filename = filename.replace("\\", "/")  # '\' on Windows184      readable_filename = readable_filename.replace(self._source_dir, "")[1:]185      if not os.path.exists(filename):186        logging.info("  \"%s\" - not found" % readable_filename)187        continue188      logging.info("  \"%s\" - OK" % readable_filename)189      f = open(filename, 'r')190      for line in f.readlines():191        if line.startswith("#") or line.startswith("//") or line.isspace():192          continue193        line = line.rstrip()194        test_prefixes = ["FLAKY", "FAILS"]195        for p in test_prefixes:196          # Strip prefixes from the test names.197          line = line.replace(".%s_" % p, ".")198        # Exclude the original test name.199        filters.append(line)200        if line[-2:] != ".*":201          # List all possible prefixes if line doesn't end with ".*".202          for p in test_prefixes:203            filters.append(line.replace(".", ".%s_" % p))204    # Get rid of duplicates.205    filters = set(filters)206    gtest_filter = self._gtest_filter207    if len(filters):208      if gtest_filter:209        gtest_filter += ":"210        if gtest_filter.find("-") < 0:211          gtest_filter += "-"212      else:213        gtest_filter = "-"214      gtest_filter += ":".join(filters)215    if gtest_filter:216      cmd.append("--gtest_filter=%s" % gtest_filter)217  @staticmethod218  def ShowTests():219    test_to_names = {}220    for name, test_function in ChromeTests._test_list.iteritems():221      test_to_names.setdefault(test_function, []).append(name)222    name_to_aliases = {}223    for names in test_to_names.itervalues():224      names.sort(key=lambda name: len(name))225      name_to_aliases[names[0]] = names[1:]226    print227    print "Available tests:"228    print "----------------"229    for name, aliases in sorted(name_to_aliases.iteritems()):230      if aliases:231        print "   {} (aka {})".format(name, ', '.join(aliases))232      else:233        print "   {}".format(name)234  def SetupLdPath(self, requires_build_dir):235    if requires_build_dir:236      self._EnsureBuildDirFound()237    elif not self._options.build_dir:238      return239    # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.240    if (os.getenv("LD_LIBRARY_PATH")):241      os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),242                                              self._options.build_dir))243    else:244      os.putenv("LD_LIBRARY_PATH", self._options.build_dir)245  def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):246    tool = valgrind_test.CreateTool(self._options.valgrind_tool)247    cmd = self._DefaultCommand(tool, name, valgrind_test_args)248    self._AppendGtestFilter(tool, name, cmd)249    cmd.extend(['--test-tiny-timeout=1000'])250    if cmd_args:251      cmd.extend(cmd_args)252    self.SetupLdPath(True)253    return tool.Run(cmd, module)254  def RunCmdLine(self):255    tool = valgrind_test.CreateTool(self._options.valgrind_tool)256    cmd = self._DefaultCommand(tool, None, self._args)257    self.SetupLdPath(False)258    return tool.Run(cmd, None)259  def TestAccessibility(self):260    return self.SimpleTest("accessibility", "accessibility_unittests")261  def TestAddressInput(self):262    return self.SimpleTest("addressinput", "libaddressinput_unittests")263  def TestAngle(self):264    return self.SimpleTest("angle", "angle_unittests")265  def TestAppList(self):266    return self.SimpleTest("app_list", "app_list_unittests")267  def TestAsh(self):268    return self.SimpleTest("ash", "ash_unittests")269  def TestAura(self):270    return self.SimpleTest("aura", "aura_unittests")271  def TestBase(self):272    return self.SimpleTest("base", "base_unittests")273  def TestBlinkHeap(self):274    return self.SimpleTest("blink_heap", "blink_heap_unittests")275  def TestBlinkPlatform(self):276    return self.SimpleTest("blink_platform", "blink_platform_unittests")277  def TestCacheInvalidation(self):278    return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")279  def TestCast(self):280    return self.SimpleTest("chrome", "cast_unittests")281  def TestCC(self):282    return self.SimpleTest("cc", "cc_unittests",283                           cmd_args=[284                               "--cc-layer-tree-test-long-timeout"])285  def TestChromeApp(self):286    return self.SimpleTest("chrome_app", "chrome_app_unittests")287  def TestChromeElf(self):288    return self.SimpleTest("chrome_elf", "chrome_elf_unittests")289  def TestChromeDriver(self):290    return self.SimpleTest("chromedriver", "chromedriver_unittests")291  def TestChromeOS(self):292    return self.SimpleTest("chromeos", "chromeos_unittests")293  def TestComponents(self):294    return self.SimpleTest("components", "components_unittests")295  def TestCompositor(self):296    return self.SimpleTest("compositor", "compositor_unittests")297  def TestContent(self):298    return self.SimpleTest("content", "content_unittests")299  def TestCourgette(self):300    return self.SimpleTest("courgette", "courgette_unittests")301  def TestCrypto(self):302    return self.SimpleTest("crypto", "crypto_unittests")303  def TestDevice(self):304    return self.SimpleTest("device", "device_unittests")305  def TestDisplay(self):306    return self.SimpleTest("display", "display_unittests")307  def TestEvents(self):308    return self.SimpleTest("events", "events_unittests")309  def TestExtensions(self):310    return self.SimpleTest("extensions", "extensions_unittests")311  def TestFFmpegRegressions(self):312    return self.SimpleTest("chrome", "ffmpeg_regression_tests")313  def TestGCM(self):314    return self.SimpleTest("gcm", "gcm_unit_tests")315  def TestGfx(self):316    return self.SimpleTest("gfx", "gfx_unittests")317  def TestGin(self):318    return self.SimpleTest("gin", "gin_unittests")319  def TestGoogleApis(self):320    return self.SimpleTest("google_apis", "google_apis_unittests")321  def TestGPU(self):322    return self.SimpleTest("gpu", "gpu_unittests")323  def TestIpc(self):324    return self.SimpleTest("ipc", "ipc_tests",325                           valgrind_test_args=["--trace_children"])326  def TestInstallerUtil(self):327    return self.SimpleTest("installer_util", "installer_util_unittests")328  def TestInstallStatic(self):329    return self.SimpleTest("install_static", "install_static_unittests")330  def TestJingle(self):331    return self.SimpleTest("chrome", "jingle_unittests")332  def TestKeyboard(self):333    return self.SimpleTest("keyboard", "keyboard_unittests")334  def TestLatency(self):335    return self.SimpleTest("latency", "latency_unittests")336  def TestMedia(self):337    return self.SimpleTest("chrome", "media_unittests")338  def TestMessageCenter(self):339    return self.SimpleTest("message_center", "message_center_unittests")340  def TestMidi(self):341    return self.SimpleTest("chrome", "midi_unittests")342  def TestMojoCommon(self):343    return self.SimpleTest("mojo_common", "mojo_common_unittests")344  def TestMojoPublicBindings(self):345    return self.SimpleTest("mojo_public_bindings",346                           "mojo_public_bindings_unittests")347  def TestMojoPublicSystem(self):348    return self.SimpleTest("mojo_public_system",349                           "mojo_public_system_unittests")350  def TestMojoPublicSysPerf(self):351    return self.SimpleTest("mojo_public_sysperf",352                           "mojo_public_system_perftests")353  def TestMojoSystem(self):354    return self.SimpleTest("mojo_system", "mojo_system_unittests")355  def TestNet(self):356    return self.SimpleTest("net", "net_unittests")357  def TestNetPerf(self):358    return self.SimpleTest("net", "net_perftests")359  def TestPhoneNumber(self):360    return self.SimpleTest("phonenumber", "libphonenumber_unittests")361  def TestPPAPI(self):362    return self.SimpleTest("chrome", "ppapi_unittests")363  def TestPrinting(self):364    return self.SimpleTest("chrome", "printing_unittests")365  def TestRemoting(self):366    return self.SimpleTest("chrome", "remoting_unittests",367                           cmd_args=[368                               "--ui-test-action-timeout=60000",369                               "--ui-test-action-max-timeout=150000"])370  def TestSkia(self):371    return self.SimpleTest("skia", "skia_unittests")372  def TestSql(self):373    return self.SimpleTest("chrome", "sql_unittests")374  def TestStorage(self):375    return self.SimpleTest("storage", "storage_unittests")376  def TestLinuxSandbox(self):377    return self.SimpleTest("sandbox", "sandbox_linux_unittests")378  def TestUnit(self):379    # http://crbug.com/51716380    # Disabling all unit tests381    # Problems reappeared after r119922382    if common.IsMac() and (self._options.valgrind_tool == "memcheck"):383      logging.warning("unit_tests are disabled for memcheck on MacOS.")384      return 0;385    return self.SimpleTest("chrome", "unit_tests")386  def TestUIBaseUnit(self):387    return self.SimpleTest("chrome", "ui_base_unittests")388  def TestUIChromeOS(self):389    return self.SimpleTest("chrome", "ui_chromeos_unittests")390  def TestURL(self):391    return self.SimpleTest("chrome", "url_unittests")392  def TestViews(self):393    return self.SimpleTest("views", "views_unittests")394  # Valgrind timeouts are in seconds.395  UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]396  # UI test timeouts are in milliseconds.397  UI_TEST_ARGS = ["--ui-test-action-timeout=60000",398                  "--ui-test-action-max-timeout=150000",399                  "--no-sandbox"]400  # TODO(thestig) fine-tune these values.401  # Valgrind timeouts are in seconds.402  BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]403  # Browser test timeouts are in milliseconds.404  BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",405                       "--ui-test-action-max-timeout=800000",406                       "--no-sandbox"]407  def TestBrowser(self):408    return self.SimpleTest("chrome", "browser_tests",409                           valgrind_test_args=self.BROWSER_VALGRIND_ARGS,410                           cmd_args=self.BROWSER_TEST_ARGS)411  def TestContentBrowser(self):412    return self.SimpleTest("content", "content_browsertests",413                           valgrind_test_args=self.BROWSER_VALGRIND_ARGS,414                           cmd_args=self.BROWSER_TEST_ARGS)415  def TestInteractiveUI(self):416    return self.SimpleTest("chrome", "interactive_ui_tests",417                           valgrind_test_args=self.UI_VALGRIND_ARGS,418                           cmd_args=self.UI_TEST_ARGS)419  def TestSyncIntegration(self):420    return self.SimpleTest("chrome", "sync_integration_tests",421                           valgrind_test_args=self.UI_VALGRIND_ARGS,422                           cmd_args=(["--ui-test-action-max-timeout=450000"]))423  def TestLayoutChunk(self, chunk_num, chunk_size):424    # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the425    # list of tests.  Wrap around to beginning of list at end.426    # If chunk_size is zero, run all tests in the list once.427    # If a text file is given as argument, it is used as the list of tests.428    assert((chunk_size == 0) != (len(self._args) == 0))429    # Build the ginormous commandline in 'cmd'.430    # It's going to be roughly431    #  python valgrind_test.py ...432    # but we'll use the --indirect flag to valgrind_test.py433    # to avoid valgrinding python.434    # Start by building the valgrind_test.py commandline.435    tool = valgrind_test.CreateTool(self._options.valgrind_tool)436    cmd = self._DefaultCommand(tool)437    cmd.append("--trace_children")438    cmd.append("--indirect_webkit_layout")439    cmd.append("--ignore_exit_code")440    # Now build script_cmd, the run-webkits-tests commandline.441    # Store each chunk in its own directory so that we can find the data later442    chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)443    out_dir = os.path.join(path_utils.ScriptDir(), "latest")444    out_dir = os.path.join(out_dir, chunk_dir)445    if os.path.exists(out_dir):446      old_files = glob.glob(os.path.join(out_dir, "*.txt"))447      for f in old_files:448        os.remove(f)449    else:450      os.makedirs(out_dir)451    script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools",452                          "Scripts", "run-webkit-tests")453    # http://crbug.com/260627: After the switch to content_shell from DRT, each454    # test now brings up 3 processes.  Under Valgrind, they become memory bound455    # and can eventually OOM if we don't reduce the total count.456    # It'd be nice if content_shell automatically throttled the startup of new457    # tests if we're low on memory.458    jobs = max(1, int(multiprocessing.cpu_count() * 0.3))459    script_cmd = ["python", script, "-v",460                  # run a separate DumpRenderTree for each test461                  "--batch-size=1",462                  "--fully-parallel",463                  "--child-processes=%d" % jobs,464                  "--time-out-ms=800000",465                  "--no-retry-failures",  # retrying takes too much time466                  # http://crbug.com/176908: Don't launch a browser when done.467                  "--no-show-results",468                  "--nocheck-sys-deps",469                  "--additional-driver-flag=--no-sandbox"]470    # Pass build mode to run-webkit-tests.  We aren't passed it directly,471    # so parse it out of build_dir.  run-webkit-tests can only handle472    # the two values "Release" and "Debug".473    # TODO(Hercules): unify how all our scripts pass around build mode474    # (--mode / --target / --build-dir / --debug)475    if self._options.build_dir:476      build_root, mode = os.path.split(self._options.build_dir)477      script_cmd.extend(["--build-directory", build_root, "--target", mode])478    if (chunk_size > 0):479      script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))480    if len(self._args):481      # if the arg is a txt file, then treat it as a list of tests482      if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":483        script_cmd.append("--test-list=%s" % self._args[0])484      else:485        script_cmd.extend(self._args)486    self._AppendGtestFilter(tool, "layout", script_cmd)487    # Now run script_cmd with the wrapper in cmd488    cmd.extend(["--"])489    cmd.extend(script_cmd)490    # Layout tests often times fail quickly, but the buildbot remains green.491    # Detect this situation when running with the default chunk size.492    if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:493      min_runtime_in_seconds=120494    else:495      min_runtime_in_seconds=0496    ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)497    return ret498  def TestLayout(self):499    # A "chunk file" is maintained in the local directory so that each test500    # runs a slice of the layout tests of size chunk_size that increments with501    # each run.  Since tests can be added and removed from the layout tests at502    # any time, this is not going to give exact coverage, but it will allow us503    # to continuously run small slices of the layout tests under valgrind rather504    # than having to run all of them in one shot.505    chunk_size = self._options.num_tests506    if chunk_size == 0 or len(self._args):507      return self.TestLayoutChunk(0, 0)508    chunk_num = 0509    chunk_file = os.path.join("valgrind_layout_chunk.txt")510    logging.info("Reading state from " + chunk_file)511    try:512      f = open(chunk_file)513      if f:514        chunk_str = f.read()515        if len(chunk_str):516          chunk_num = int(chunk_str)517        # This should be enough so that we have a couple of complete runs518        # of test data stored in the archive (although note that when we loop519        # that we almost guaranteed won't be at the end of the test list)520        if chunk_num > 10000:521          chunk_num = 0522        f.close()523    except IOError, (errno, strerror):524      logging.error("error reading from file %s (%d, %s)" % (chunk_file,525                    errno, strerror))526    # Save the new chunk size before running the tests. Otherwise if a527    # particular chunk hangs the bot, the chunk number will never get528    # incremented and the bot will be wedged.529    logging.info("Saving state to " + chunk_file)530    try:531      f = open(chunk_file, "w")532      chunk_num += 1533      f.write("%d" % chunk_num)534      f.close()535    except IOError, (errno, strerror):536      logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,537                    strerror))538    # Since we're running small chunks of the layout tests, it's important to539    # mark the ones that have errors in them.  These won't be visible in the540    # summary list for long, but will be useful for someone reviewing this bot.541    return self.TestLayoutChunk(chunk_num, chunk_size)542  # The known list of tests.543  # Recognise the original abbreviations as well as full executable names.544  _test_list = {545    "cmdline" : RunCmdLine,546    "addressinput": TestAddressInput,547    "libaddressinput_unittests": TestAddressInput,548    "accessibility": TestAccessibility,549    "angle": TestAngle,          "angle_unittests": TestAngle,550    "app_list": TestAppList,     "app_list_unittests": TestAppList,551    "ash": TestAsh,              "ash_unittests": TestAsh,552    "aura": TestAura,            "aura_unittests": TestAura,553    "base": TestBase,            "base_unittests": TestBase,554    "blink_heap": TestBlinkHeap,555    "blink_platform": TestBlinkPlatform,556    "browser": TestBrowser,      "browser_tests": TestBrowser,557    "cacheinvalidation": TestCacheInvalidation,558    "cacheinvalidation_unittests": TestCacheInvalidation,559    "cast": TestCast,            "cast_unittests": TestCast,560    "cc": TestCC,                "cc_unittests": TestCC,561    "chrome_app": TestChromeApp,562    "chrome_elf": TestChromeElf,563    "chromedriver": TestChromeDriver,564    "chromeos": TestChromeOS,    "chromeos_unittests": TestChromeOS,565    "components": TestComponents,"components_unittests": TestComponents,566    "compositor": TestCompositor,"compositor_unittests": TestCompositor,567    "content": TestContent,      "content_unittests": TestContent,568    "content_browsertests": TestContentBrowser,569    "courgette": TestCourgette,  "courgette_unittests": TestCourgette,570    "crypto": TestCrypto,        "crypto_unittests": TestCrypto,571    "device": TestDevice,        "device_unittests": TestDevice,572    "display": TestDisplay,      "display_unittests": TestDisplay,573    "events": TestEvents,        "events_unittests": TestEvents,574    "extensions": TestExtensions, "extensions_unittests": TestExtensions,575    "ffmpeg_regression_tests": TestFFmpegRegressions,576    "gcm": TestGCM,              "gcm_unit_tests": TestGCM,577    "gin": TestGin,              "gin_unittests": TestGin,578    "gfx": TestGfx,              "gfx_unittests": TestGfx,579    "google_apis": TestGoogleApis,580    "gpu": TestGPU,              "gpu_unittests": TestGPU,581    "ipc": TestIpc,              "ipc_tests": TestIpc,582    "installer_util": TestInstallerUtil,583    "installer_util_unittests": TestInstallerUtil,584    "install_static_unittests": TestInstallStatic,585    "interactive_ui": TestInteractiveUI,586    "jingle": TestJingle,        "jingle_unittests": TestJingle,587    "keyboard": TestKeyboard,    "keyboard_unittests": TestKeyboard,588    "latency": TestLatency,      "latency_unittests": TestLatency,589    "layout": TestLayout,        "layout_tests": TestLayout,590    "media": TestMedia,          "media_unittests": TestMedia,591    "message_center": TestMessageCenter,592    "message_center_unittests" : TestMessageCenter,593    "midi": TestMidi,             "midi_unittests": TestMidi,594    "mojo_common": TestMojoCommon,595    "mojo_common_unittests": TestMojoCommon,596    "mojo_system": TestMojoSystem,597    "mojo_system_unittests": TestMojoSystem,598    "mojo_public_system": TestMojoPublicSystem,599    "mojo_public_system_unittests": TestMojoPublicSystem,600    "mojo_public_bindings": TestMojoPublicBindings,601    "mojo_public_bindings_unittests": TestMojoPublicBindings,602    "mojo_public_sysperf": TestMojoPublicSysPerf,603    "net": TestNet,              "net_unittests": TestNet,604    "net_perf": TestNetPerf,     "net_perftests": TestNetPerf,605    "phonenumber": TestPhoneNumber,606    "libphonenumber_unittests": TestPhoneNumber,607    "ppapi": TestPPAPI,          "ppapi_unittests": TestPPAPI,608    "printing": TestPrinting,    "printing_unittests": TestPrinting,609    "remoting": TestRemoting,    "remoting_unittests": TestRemoting,610    "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,611    "skia": TestSkia,            "skia_unittests": TestSkia,612    "sql": TestSql,              "sql_unittests": TestSql,613    "storage": TestStorage,      "storage_unittests": TestStorage,614    "sync_integration_tests": TestSyncIntegration,615    "sync_integration": TestSyncIntegration,616    "ui_base_unit": TestUIBaseUnit,       "ui_base_unittests": TestUIBaseUnit,617    "ui_chromeos": TestUIChromeOS, "ui_chromeos_unittests": TestUIChromeOS,618    "unit": TestUnit,            "unit_tests": TestUnit,619    "url": TestURL,              "url_unittests": TestURL,620    "views": TestViews,          "views_unittests": TestViews,621    "webkit": TestLayout,622  }623def _main():624  parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "625                                 "[-t <test> ...]")626  parser.add_option("--help-tests", dest="help_tests", action="store_true",627                    default=False, help="List all available tests")628  parser.add_option("-b", "--build-dir",629                    help="the location of the compiler output")630  parser.add_option("--target", help="Debug or Release")631  parser.add_option("-t", "--test", action="append", default=[],632                    help="which test to run, supports test:gtest_filter format "633                         "as well.")634  parser.add_option("--baseline", action="store_true", default=False,635                    help="generate baseline data instead of validating")636  parser.add_option("-f", "--force", action="store_true", default=False,637                    help="run a broken test anyway")638  parser.add_option("--gtest_filter",639                    help="additional arguments to --gtest_filter")640  parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")641  parser.add_option("--gtest_shuffle", action="store_true", default=False,642                    help="Randomize tests' orders on every iteration.")643  parser.add_option("--gtest_break_on_failure", action="store_true",644                    default=False,645                    help="Drop in to debugger on assertion failure. Also "646                         "useful for forcing tests to exit with a stack dump "647                         "on the first assertion failure when running with "648                         "--gtest_repeat=-1")649  parser.add_option("-v", "--verbose", action="store_true", default=False,650                    help="verbose output - enable debug log messages")651  parser.add_option("--tool", dest="valgrind_tool", default="memcheck",652                    help="specify a valgrind tool to run the tests under")653  parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",654                    help="specify custom flags for the selected valgrind tool")655  parser.add_option("--keep_logs", action="store_true", default=False,656                    help="store memory tool logs in the <tool>.logs directory "657                         "instead of /tmp.\nThis can be useful for tool "658                         "developers/maintainers.\nPlease note that the <tool>"659                         ".logs directory will be clobbered on tool startup.")660  parser.add_option("-n", "--num_tests", type="int",661                    default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,662                    help="for layout tests: # of subtests per run.  0 for all.")663  parser.add_option("--test-launcher-bot-mode", action="store_true",664                    help="run the tests with --test-launcher-bot-mode")665  parser.add_option("--test-launcher-total-shards", type=int,666                    help="run the tests with --test-launcher-total-shards")667  parser.add_option("--test-launcher-shard-index", type=int,668                    help="run the tests with --test-launcher-shard-index")669  parser.add_option("--drmemory_ops",670                    help="extra options passed to Dr. Memory")671  options, args = parser.parse_args()672  # Bake target into build_dir.673  if options.target and options.build_dir:674    assert (options.target !=675            os.path.basename(os.path.dirname(options.build_dir)))676    options.build_dir = os.path.join(os.path.abspath(options.build_dir),677                                     options.target)678  if options.verbose:679    logging_utils.config_root(logging.DEBUG)680  else:681    logging_utils.config_root()682  if options.help_tests:683    ChromeTests.ShowTests()684    return 0685  if not options.test:686    parser.error("--test not specified")687  if len(options.test) != 1 and options.gtest_filter:688    parser.error("--gtest_filter and multiple tests don't make sense together")689  BROKEN_TESTS = {690    'drmemory_light': [691      'addressinput',692      'aura',693      'base_unittests',694      'cc',695      'components', # x64 only?696      'content',697      'gfx',698      'mojo_public_bindings',699    ],700    'drmemory_full': [701      'addressinput',702      'aura',703      'base_unittests',704      'blink_heap',705      'blink_platform',706      'browser_tests',707      'cast',708      'cc',709      'chromedriver',710      'compositor',711      'content',712      'content_browsertests',713      'device',714      'events',715      'extensions',716      'gfx',717      'google_apis',718      'gpu',719      'ipc_tests',720      'jingle',721      'keyboard',722      'media',723      'midi',724      'mojo_common',725      'mojo_public_bindings',726      'mojo_public_sysperf',727      'mojo_public_system',728      'mojo_system',729      'net',730      'remoting',731      'unit',732      'url',733    ],734  }735  for t in options.test:736    if t in BROKEN_TESTS[options.valgrind_tool] and not options.force:737      logging.info("Skipping broken %s test %s -- see crbug.com/633693" %738                   (options.valgrind_tool, t))739      return 0740    tests = ChromeTests(options, args, t)741    ret = tests.Run()742    if ret: return ret743  return 0744if __name__ == "__main__":...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
