How to use _build_config method in Slash

Best Python code snippet using slash

cbuildbot_stages.py

Source:cbuildbot_stages.py Github

copy

Full Screen

1# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.2# Use of this source code is governed by a BSD-style license that can be3# found in the LICENSE file.4"""Module containing the various stages that a builder runs."""5import contextlib6import datetime7import functools8import glob9import json10import logging11import math12import multiprocessing13import os14import Queue15import shutil16import sys17from chromite.buildbot import builderstage as bs18from chromite.buildbot import cbuildbot_commands as commands19from chromite.buildbot import cbuildbot_config20from chromite.buildbot import cbuildbot_results as results_lib21from chromite.buildbot import constants22from chromite.buildbot import lab_status23from chromite.buildbot import lkgm_manager24from chromite.buildbot import manifest_version25from chromite.buildbot import portage_utilities26from chromite.buildbot import repository27from chromite.buildbot import trybot_patch_pool28from chromite.buildbot import validation_pool29from chromite.lib import commandline30from chromite.lib import cros_build_lib31from chromite.lib import git32from chromite.lib import gs33from chromite.lib import toolchain34from chromite.lib import osutils35from chromite.lib import parallel36from chromite.lib import patch as cros_patch37_FULL_BINHOST = 'FULL_BINHOST'38_PORTAGE_BINHOST = 'PORTAGE_BINHOST'39_CROS_ARCHIVE_URL = 'CROS_ARCHIVE_URL'40_PRINT_INTERVAL = 141_VM_TEST_ERROR_MSG = """42!!!VMTests failed!!!43Logs are uploaded in the corresponding vm_test_results.tgz. This can be found44by clicking on the artifacts link in the "Report" Stage. Specifically look45for the test_harness/failed for the failing tests. For more46particulars, please refer to which test failed i.e. above see the47individual test that failed -- or if an update failed, check the48corresponding update directory.49"""50class NonHaltingBuilderStage(bs.BuilderStage):51 """Build stage that fails a build but finishes the other steps."""52 def Run(self):53 try:54 super(NonHaltingBuilderStage, self).Run()55 except results_lib.StepFailure:56 name = self.__class__.__name__57 cros_build_lib.Error('Ignoring StepFailure in %s', name)58class ForgivingBuilderStage(bs.BuilderStage):59 """Build stage that turns a build step red but not a build."""60 def _HandleStageException(self, exception):61 """Override and don't set status to FAIL but FORGIVEN instead."""62 return self._HandleExceptionAsWarning(exception)63class BoardSpecificBuilderStage(bs.BuilderStage):64 def __init__(self, options, build_config, board, suffix=None):65 super(BoardSpecificBuilderStage, self).__init__(options, build_config,66 suffix)67 self._current_board = board68 if not isinstance(board, basestring):69 raise TypeError('Expected string, got %r' % (board,))70 # Add a board name suffix to differentiate between various boards (in case71 # more than one board is built on a single builder.)72 if len(self._boards) > 1 or build_config['grouped']:73 self.name = '%s [%s]' % (self.name, board)74 def GetImageDirSymlink(self, pointer='latest-cbuildbot'):75 """Get the location of the current image."""76 buildroot, board = self._options.buildroot, self._current_board77 return os.path.join(buildroot, 'src', 'build', 'images', board, pointer)78class ArchivingStage(BoardSpecificBuilderStage):79 """Helper for stages that archive files.80 Attributes:81 archive_stage: The ArchiveStage instance for this board.82 bot_archive_root: The root path where output from this builder is stored.83 download_url: The URL where we can download artifacts.84 upload_url: The Google Storage location where we should upload artifacts.85 """86 PROCESSES = 1087 _BUILDBOT_ARCHIVE = 'buildbot_archive'88 _TRYBOT_ARCHIVE = 'trybot_archive'89 @classmethod90 def GetArchiveRoot(cls, buildroot, trybot=False):91 """Return the location where archive images are kept."""92 archive_base = cls._TRYBOT_ARCHIVE if trybot else cls._BUILDBOT_ARCHIVE93 return os.path.join(buildroot, archive_base)94 def __init__(self, options, build_config, board, archive_stage, suffix=None):95 super(ArchivingStage, self).__init__(options, build_config, board,96 suffix=suffix)97 self.archive_stage = archive_stage98 if options.remote_trybot:99 self.debug = options.debug_forced100 else:101 self.debug = options.debug102 self.version = archive_stage.GetVersion()103 gsutil_archive = self._GetGSUtilArchiveDir()104 self.upload_url = '%s/%s' % (gsutil_archive, self.version)105 trybot = not options.buildbot or options.debug106 archive_root = ArchivingStage.GetArchiveRoot(self._build_root, trybot)107 self.bot_archive_root = os.path.join(archive_root, self._bot_id)108 self.archive_path = os.path.join(self.bot_archive_root, self.version)109 if options.buildbot or options.remote_trybot:110 base_download_url = gs.PRIVATE_BASE_HTTPS_URL111 self.download_url = self.upload_url.replace('gs://', base_download_url)112 else:113 self.download_url = self.archive_path114 @contextlib.contextmanager115 def ArtifactUploader(self, queue=None, archive=True, strict=True):116 """Upload each queued input in the background.117 This context manager starts a set of workers in the background, who each118 wait for input on the specified queue. These workers run119 self.UploadArtifact(*args, archive=archive) for each input in the queue.120 Arguments:121 queue: Queue to use. Add artifacts to this queue, and they will be122 uploaded in the background. If None, one will be created on the fly.123 archive: Whether to automatically copy files to the archive dir.124 strict: Whether to treat upload errors as fatal.125 Returns:126 The queue to use. This is only useful if you did not supply a queue.127 """128 upload = lambda path: self.UploadArtifact(path, archive, strict)129 with parallel.BackgroundTaskRunner(upload, queue=queue,130 processes=self.PROCESSES) as bg_queue:131 yield bg_queue132 def PrintDownloadLink(self, filename, prefix=''):133 """Print a link to an artifact in Google Storage.134 Args:135 filename: The filename of the uploaded file.136 prefix: The prefix to put in front of the filename.137 """138 url = '%s/%s' % (self.download_url.rstrip('/'), filename)139 cros_build_lib.PrintBuildbotLink(prefix + filename, url)140 def UploadArtifact(self, path, archive=True, strict=True):141 """Upload generated artifact to Google Storage.142 Arguments:143 path: Path of local file to upload to Google Storage.144 archive: Whether to automatically copy files to the archive dir.145 strict: Whether to treat upload errors as fatal.146 """147 acl = None if self._build_config['internal'] else 'public-read'148 filename = path149 if archive:150 filename = commands.ArchiveFile(path, self.archive_path)151 try:152 commands.UploadArchivedFile(self.archive_path, self.upload_url, filename,153 self.debug, update_list=True, acl=acl)154 except cros_build_lib.RunCommandError as e:155 cros_build_lib.PrintBuildbotStepText('Upload failed')156 if strict:157 raise158 # Treat gsutil flake as a warning if it's the only problem.159 self._HandleExceptionAsWarning(e)160 def _GetGSUtilArchiveDir(self):161 if self._options.archive_base:162 gs_base = self._options.archive_base163 elif (self._options.remote_trybot or164 self._build_config['gs_path'] == cbuildbot_config.GS_PATH_DEFAULT):165 gs_base = constants.DEFAULT_ARCHIVE_BUCKET166 else:167 return self._build_config['gs_path']168 return '%s/%s' % (gs_base, self._bot_id)169class CleanUpStage(bs.BuilderStage):170 """Stages that cleans up build artifacts from previous runs.171 This stage cleans up previous KVM state, temporary git commits,172 clobbers, and wipes tmp inside the chroot.173 """174 option_name = 'clean'175 def _CleanChroot(self):176 commands.CleanupChromeKeywordsFile(self._boards,177 self._build_root)178 chroot_tmpdir = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR,179 'tmp')180 if os.path.exists(chroot_tmpdir):181 cros_build_lib.SudoRunCommand(['rm', '-rf', chroot_tmpdir],182 print_cmd=False)183 cros_build_lib.SudoRunCommand(['mkdir', '--mode', '1777', chroot_tmpdir],184 print_cmd=False)185 def _DeleteChroot(self):186 chroot = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)187 if os.path.exists(chroot):188 cros_build_lib.RunCommand(['cros_sdk', '--delete', '--chroot', chroot],189 self._build_root,190 cwd=self._build_root)191 def _DeleteArchivedTrybotImages(self):192 """For trybots, clear all previus archive images to save space."""193 archive_root = ArchivingStage.GetArchiveRoot(self._build_root, trybot=True)194 shutil.rmtree(archive_root, ignore_errors=True)195 def _DeleteArchivedPerfResults(self):196 """Clear any previously stashed perf results from hw testing."""197 for result in glob.glob(os.path.join(198 self._options.log_dir, '*.%s' % HWTestStage.PERF_RESULTS_EXTENSION)):199 os.remove(result)200 def PerformStage(self):201 if (not (self._options.buildbot or self._options.remote_trybot)202 and self._options.clobber):203 if not commands.ValidateClobber(self._build_root):204 cros_build_lib.Die("--clobber in local mode must be approved.")205 # If we can't get a manifest out of it, then it's not usable and must be206 # clobbered.207 manifest = None208 if not self._options.clobber:209 try:210 manifest = git.ManifestCheckout.Cached(self._build_root, search=False)211 except (KeyboardInterrupt, MemoryError, SystemExit):212 raise213 except Exception, e:214 # Either there is no repo there, or the manifest isn't usable. If the215 # directory exists, log the exception for debugging reasons. Either216 # way, the checkout needs to be wiped since it's in an unknown217 # state.218 if os.path.exists(self._build_root):219 cros_build_lib.Warning("ManifestCheckout at %s is unusable: %s",220 self._build_root, e)221 if manifest is None:222 self._DeleteChroot()223 repository.ClearBuildRoot(self._build_root, self._options.preserve_paths)224 else:225 # Clean mount points first to be safe about deleting.226 commands.CleanUpMountPoints(self._build_root)227 commands.BuildRootGitCleanup(self._build_root, self._options.debug)228 tasks = [functools.partial(commands.BuildRootGitCleanup,229 self._build_root, self._options.debug),230 functools.partial(commands.WipeOldOutput, self._build_root),231 self._DeleteArchivedTrybotImages,232 self._DeleteArchivedPerfResults]233 if self._build_config['chroot_replace'] and self._options.build:234 tasks.append(self._DeleteChroot)235 else:236 tasks.append(self._CleanChroot)237 parallel.RunParallelSteps(tasks)238class PatchChangesStage(bs.BuilderStage):239 """Stage that patches a set of Gerrit changes to the buildroot source tree."""240 def __init__(self, options, build_config, patch_pool):241 """Construct a PatchChangesStage.242 Args:243 options, build_config: See arguments to bs.BuilderStage.__init__()244 patch_pool: A TrybotPatchPool object containing the different types of245 patches to apply.246 """247 bs.BuilderStage.__init__(self, options, build_config)248 self.patch_pool = patch_pool249 @staticmethod250 def _CheckForDuplicatePatches(_series, changes):251 conflicts = {}252 duplicates = []253 for change in changes:254 if change.id is None:255 cros_build_lib.Warning(256 "Change %s lacks a usable ChangeId; duplicate checking cannot "257 "be done for this change. If cherry-picking fails, this is a "258 "potential cause.", change)259 continue260 conflicts.setdefault(change.id, []).append(change)261 duplicates = [x for x in conflicts.itervalues() if len(x) > 1]262 if not duplicates:263 return changes264 for conflict in duplicates:265 cros_build_lib.Error(266 "Changes %s conflict with each other- they have same id %s.",267 ', '.join(map(str, conflict)), conflict[0].id)268 cros_build_lib.Die("Duplicate patches were encountered: %s", duplicates)269 @staticmethod270 def _FixIncompleteRemotePatches(series, changes):271 """Identify missing remote patches from older cbuildbot instances.272 Cbuildbot, prior to I8ab6790de801900c115a437b5f4ebb9a24db542f, uploaded273 a single patch per project- despite if there may have been a hundred274 patches actually pulled in by that patch. This method detects when275 we're dealing w/ the old incomplete version, and fills in those gaps."""276 broken = [x for x in changes277 if isinstance(x, cros_patch.UploadedLocalPatch)]278 if not broken:279 return changes280 changes = list(changes)281 known = cros_patch.PatchCache(changes)282 for change in broken:283 git_repo = series.GetGitRepoForChange(change)284 tracking = series.GetTrackingBranchForChange(change)285 branch = getattr(change, 'original_branch', tracking)286 for target in cros_patch.GeneratePatchesFromRepo(287 git_repo, change.project, tracking, branch, change.internal,288 allow_empty=True, starting_ref='%s^' % change.sha1):289 if target in known:290 continue291 known.Inject(target)292 changes.append(target)293 return changes294 def _PatchSeriesFilter(self, series, changes):295 if self._options.remote_version == 3:296 changes = self._FixIncompleteRemotePatches(series, changes)297 return self._CheckForDuplicatePatches(series, changes)298 def _ApplyPatchSeries(self, series, patch_pool, **kwargs):299 """Applies a patch pool using a patch series."""300 kwargs.setdefault('frozen', False)301 # Honor the given ordering, so that if a gerrit/remote patch302 # conflicts w/ a local patch, the gerrit/remote patch are303 # blamed rather than local (patch ordering is typically304 # local, gerrit, then remote).305 kwargs.setdefault('honor_ordering', True)306 kwargs['changes_filter'] = self._PatchSeriesFilter307 _applied, failed_tot, failed_inflight = series.Apply(308 list(patch_pool), **kwargs)309 failures = failed_tot + failed_inflight310 if failures:311 cros_build_lib.Die("Failed applying patches: %s",312 "\n".join(map(str, failures)))313 def PerformStage(self):314 class NoisyPatchSeries(validation_pool.PatchSeries):315 """Custom PatchSeries that adds links to buildbot logs for remote trys."""316 def ApplyChange(self, change, dryrun=False):317 if isinstance(change, cros_patch.GerritPatch):318 cros_build_lib.PrintBuildbotLink(str(change), change.url)319 elif isinstance(change, cros_patch.UploadedLocalPatch):320 cros_build_lib.PrintBuildbotStepText(str(change))321 return validation_pool.PatchSeries.ApplyChange(self, change,322 dryrun=dryrun)323 # If we're an external builder, ignore internal patches.324 helper_pool = validation_pool.HelperPool.SimpleCreate(325 cros_internal=self._build_config['internal'], cros=True)326 # Limit our resolution to non-manifest patches.327 patch_series = NoisyPatchSeries(328 self._build_root,329 force_content_merging=True,330 helper_pool=helper_pool,331 deps_filter_fn=lambda p: not trybot_patch_pool.ManifestFilter(p))332 self._ApplyPatchSeries(patch_series, self.patch_pool)333class BootstrapStage(PatchChangesStage):334 """Stage that patches a chromite repo and re-executes inside it.335 Attributes:336 returncode - the returncode of the cbuildbot re-execution. Valid after337 calling stage.Run().338 """339 option_name = 'bootstrap'340 def __init__(self, options, build_config, chromite_patch_pool,341 manifest_patch_pool=None):342 super(BootstrapStage, self).__init__(343 options, build_config, trybot_patch_pool.TrybotPatchPool())344 self.chromite_patch_pool = chromite_patch_pool345 self.manifest_patch_pool = manifest_patch_pool346 self.returncode = None347 def _ApplyManifestPatches(self, patch_pool):348 """Apply a pool of manifest patches to a temp manifest checkout.349 Arguments:350 filter_fn: Used to filter changes during dependency resolution.351 Returns:352 The path to the patched manifest checkout.353 Raises:354 Exception, if the new patched manifest cannot be parsed.355 """356 checkout_dir = os.path.join(self.tempdir, 'manfest-checkout')357 repository.CloneGitRepo(checkout_dir,358 self._build_config['manifest_repo_url'])359 patch_series = validation_pool.PatchSeries.WorkOnSingleRepo(360 checkout_dir, deps_filter_fn=trybot_patch_pool.ManifestFilter,361 tracking_branch=self._target_manifest_branch)362 self._ApplyPatchSeries(patch_series, patch_pool)363 # Create the branch that 'repo init -b <target_branch> -u <patched_repo>'364 # will look for.365 cmd = ['branch', '-f', self._target_manifest_branch, constants.PATCH_BRANCH]366 git.RunGit(checkout_dir, cmd)367 # Verify that the patched manifest loads properly. Propagate any errors as368 # exceptions.369 manifest = os.path.join(checkout_dir, self._build_config['manifest'])370 git.Manifest.Cached(manifest, manifest_include_dir=checkout_dir)371 return checkout_dir372 @staticmethod373 def _FilterArgsForApi(parsed_args, api_minor):374 """Remove arguments that are introduced after an api version."""375 def filter_fn(passed_arg):376 return passed_arg.opt_inst.api_version <= api_minor377 accepted, removed = commandline.FilteringParser.FilterArgs(378 parsed_args, filter_fn)379 if removed:380 cros_build_lib.Warning('The following arguments were removed due to api: '381 "'%s'" % ' '.join(removed))382 return accepted383 @classmethod384 def FilterArgsForTargetCbuildbot(cls, buildroot, cbuildbot_path, options):385 _, minor = cros_build_lib.GetTargetChromiteApiVersion(buildroot)386 args = [cbuildbot_path]387 args.extend(options.build_targets)388 args.extend(cls._FilterArgsForApi(options.parsed_args, minor))389 # Only pass down --cache-dir if it was specified. By default, we want390 # the cache dir to live in the root of each checkout, so this means that391 # each instance of cbuildbot needs to calculate the default separately.392 if minor >= 2 and options.cache_dir_specified:393 args += ['--cache-dir', options.cache_dir]394 return args395 #pylint: disable=E1101396 @osutils.TempDirDecorator397 def PerformStage(self):398 # The plan for the builders is to use master branch to bootstrap other399 # branches. Now, if we wanted to test patches for both the bootstrap code400 # (on master) and the branched chromite (say, R20), we need to filter the401 # patches by branch.402 filter_branch = self._target_manifest_branch403 if self._options.test_bootstrap:404 filter_branch = 'master'405 chromite_dir = os.path.join(self.tempdir, 'chromite')406 reference_repo = os.path.join(constants.SOURCE_ROOT, 'chromite', '.git')407 repository.CloneGitRepo(chromite_dir, constants.CHROMITE_URL,408 reference=reference_repo)409 git.RunGit(chromite_dir, ['checkout', filter_branch])410 def BranchAndChromiteFilter(patch):411 return (trybot_patch_pool.BranchFilter(filter_branch, patch) and412 trybot_patch_pool.ChromiteFilter(patch))413 patch_series = validation_pool.PatchSeries.WorkOnSingleRepo(414 chromite_dir, filter_branch,415 deps_filter_fn=BranchAndChromiteFilter)416 filtered_pool = self.chromite_patch_pool.FilterBranch(filter_branch)417 if filtered_pool:418 self._ApplyPatchSeries(patch_series, filtered_pool)419 cbuildbot_path = constants.PATH_TO_CBUILDBOT420 if not os.path.exists(os.path.join(self.tempdir, cbuildbot_path)):421 cbuildbot_path = 'chromite/buildbot/cbuildbot'422 cmd = self.FilterArgsForTargetCbuildbot(self.tempdir, cbuildbot_path,423 self._options)424 extra_params = ['--sourceroot=%s' % self._options.sourceroot]425 extra_params.extend(self._options.bootstrap_args)426 if self._options.test_bootstrap:427 # We don't want re-executed instance to see this.428 cmd = [a for a in cmd if a != '--test-bootstrap']429 else:430 # If we've already done the desired number of bootstraps, disable431 # bootstrapping for the next execution. Also pass in the patched manifest432 # repository.433 extra_params.append('--nobootstrap')434 if self.manifest_patch_pool:435 manifest_dir = self._ApplyManifestPatches(self.manifest_patch_pool)436 extra_params.extend(['--manifest-repo-url', manifest_dir])437 cmd += extra_params438 result_obj = cros_build_lib.RunCommand(439 cmd, cwd=self.tempdir, kill_timeout=30, error_code_ok=True)440 self.returncode = result_obj.returncode441class SyncStage(bs.BuilderStage):442 """Stage that performs syncing for the builder."""443 option_name = 'sync'444 output_manifest_sha1 = True445 def __init__(self, options, build_config):446 super(SyncStage, self).__init__(options, build_config)447 self.repo = None448 self.skip_sync = False449 self.internal = self._build_config['internal']450 def _GetManifestVersionsRepoUrl(self, read_only=False):451 return cbuildbot_config.GetManifestVersionsRepoUrl(452 self.internal,453 read_only=read_only)454 def Initialize(self):455 self._InitializeRepo()456 def _InitializeRepo(self, build_root=None, **kwds):457 if build_root is None:458 build_root = self._build_root459 manifest_url = self._options.manifest_repo_url460 if manifest_url is None:461 manifest_url = self._build_config['manifest_repo_url']462 kwds.setdefault('referenced_repo', self._options.reference_repo)463 kwds.setdefault('branch', self._target_manifest_branch)464 kwds.setdefault('manifest', self._build_config['manifest'])465 self.repo = repository.RepoRepository(manifest_url, build_root, **kwds)466 def GetNextManifest(self):467 """Returns the manifest to use."""468 return self._build_config['manifest']469 def ManifestCheckout(self, next_manifest):470 """Checks out the repository to the given manifest."""471 self._Print('\n'.join(['BUILDROOT: %s' % self.repo.directory,472 'TRACKING BRANCH: %s' % self.repo.branch,473 'NEXT MANIFEST: %s' % next_manifest]))474 if not self.skip_sync:475 self.repo.Sync(next_manifest)476 print >> sys.stderr, self.repo.ExportManifest(477 mark_revision=self.output_manifest_sha1)478 def PerformStage(self):479 self.Initialize()480 with osutils.TempDir() as tempdir:481 # Save off the last manifest.482 fresh_sync = True483 if os.path.exists(self.repo.directory) and not self._options.clobber:484 old_filename = os.path.join(tempdir, 'old.xml')485 try:486 old_contents = self.repo.ExportManifest()487 except cros_build_lib.RunCommandError as e:488 cros_build_lib.Warning(str(e))489 else:490 osutils.WriteFile(old_filename, old_contents)491 fresh_sync = False492 # Sync.493 self.ManifestCheckout(self.GetNextManifest())494 # Print the blamelist.495 if fresh_sync:496 cros_build_lib.PrintBuildbotStepText('(From scratch)')497 elif self._options.buildbot:498 lkgm_manager.GenerateBlameList(self.repo, old_filename)499class LKGMSyncStage(SyncStage):500 """Stage that syncs to the last known good manifest blessed by builders."""501 output_manifest_sha1 = False502 def GetNextManifest(self):503 """Override: Gets the LKGM."""504 # TODO(sosa): Should really use an initialized manager here.505 if self.internal:506 mv_dir = 'manifest-versions-internal'507 else:508 mv_dir = 'manifest-versions'509 manifest_path = os.path.join(self._build_root, mv_dir)510 manifest_repo = self._GetManifestVersionsRepoUrl(read_only=True)511 manifest_version.RefreshManifestCheckout(manifest_path, manifest_repo)512 return os.path.join(manifest_path, lkgm_manager.LKGMManager.LKGM_PATH)513class ChromeLKGMSyncStage(SyncStage):514 """Stage that syncs to the last known good manifest for Chrome."""515 output_manifest_sha1 = False516 def GetNextManifest(self):517 """Override: Gets the LKGM from the Chrome tree."""518 chrome_lkgm = commands.GetChromeLKGM(self._options.chrome_version)519 # We need a full buildspecs manager here as we need an initialized manifest520 # manager with paths to the spec.521 manifest_manager = manifest_version.BuildSpecsManager(522 source_repo=self.repo,523 manifest_repo=self._GetManifestVersionsRepoUrl(read_only=False),524 build_name=self._bot_id,525 incr_type='build',526 force=False,527 branch=self._target_manifest_branch)528 manifest_manager.BootstrapFromVersion(chrome_lkgm)529 return manifest_manager.GetLocalManifest(chrome_lkgm)530class ManifestVersionedSyncStage(SyncStage):531 """Stage that generates a unique manifest file, and sync's to it."""532 manifest_manager = None533 output_manifest_sha1 = False534 def __init__(self, options, build_config):535 # Perform the sync at the end of the stage to the given manifest.536 super(ManifestVersionedSyncStage, self).__init__(options, build_config)537 self.repo = None538 # If a builder pushes changes (even with dryrun mode), we need a writable539 # repository. Otherwise, the push will be rejected by the server.540 self.manifest_repo = self._GetManifestVersionsRepoUrl(read_only=False)541 # 1. If we're uprevving Chrome, Chrome might have changed even if the542 # manifest has not, so we should force a build to double check. This543 # means that we'll create a new manifest, even if there are no changes.544 # 2. If we're running with --debug, we should always run through to545 # completion, so as to ensure a complete test.546 self._force = self._chrome_rev or options.debug547 def HandleSkip(self):548 """Initializes a manifest manager to the specified version if skipped."""549 super(ManifestVersionedSyncStage, self).HandleSkip()550 if self._options.force_version:551 self.Initialize()552 self.ForceVersion(self._options.force_version)553 def ForceVersion(self, version):554 """Creates a manifest manager from given version and returns manifest."""555 return ManifestVersionedSyncStage.manifest_manager.BootstrapFromVersion(556 version)557 def Initialize(self):558 """Initializes a manager that manages manifests for associated stages."""559 increment = ('build' if self._target_manifest_branch == 'master'560 else 'branch')561 dry_run = self._options.debug562 self._InitializeRepo()563 # If chrome_rev is somehow set, fail.564 assert not self._chrome_rev, \565 'chrome_rev is unsupported on release builders.'566 ManifestVersionedSyncStage.manifest_manager = \567 manifest_version.BuildSpecsManager(568 source_repo=self.repo,569 manifest_repo=self.manifest_repo,570 manifest=self._build_config['manifest'],571 build_name=self._bot_id,572 incr_type=increment,573 force=self._force,574 branch=self._target_manifest_branch,575 dry_run=dry_run,576 master=self._build_config['master'])577 def GetNextManifest(self):578 """Uses the initialized manifest manager to get the next manifest."""579 assert self.manifest_manager, \580 'Must run GetStageManager before checkout out build.'581 to_return = self.manifest_manager.GetNextBuildSpec()582 previous_version = self.manifest_manager.GetLatestPassingSpec()583 target_version = self.manifest_manager.current_version584 # Print the Blamelist here.585 url_prefix = 'http://chromeos-images.corp.google.com/diff/report?'586 url = url_prefix + 'from=%s&to=%s' % (previous_version, target_version)587 cros_build_lib.PrintBuildbotLink('Blamelist', url)588 return to_return589 def PerformStage(self):590 self.Initialize()591 if self._options.force_version:592 next_manifest = self.ForceVersion(self._options.force_version)593 else:594 next_manifest = self.GetNextManifest()595 if not next_manifest:596 cros_build_lib.Info('Found no work to do.')597 if ManifestVersionedSyncStage.manifest_manager.DidLastBuildFail():598 raise results_lib.StepFailure('The previous build failed.')599 else:600 sys.exit(0)601 # Log this early on for the release team to grep out before we finish.602 if ManifestVersionedSyncStage.manifest_manager:603 self._Print('\nRELEASETAG: %s\n' % (604 ManifestVersionedSyncStage.manifest_manager.current_version))605 self.ManifestCheckout(next_manifest)606class LKGMCandidateSyncStage(ManifestVersionedSyncStage):607 """Stage that generates a unique manifest file candidate, and sync's to it."""608 sub_manager = None609 def __init__(self, options, build_config):610 super(LKGMCandidateSyncStage, self).__init__(options, build_config)611 # lkgm_manager deals with making sure we're synced to whatever manifest612 # we get back in GetNextManifest so syncing again is redundant.613 self.skip_sync = True614 def _GetInitializedManager(self, internal):615 """Returns an initialized lkgm manager."""616 increment = ('build' if self._target_manifest_branch == 'master'617 else 'branch')618 return lkgm_manager.LKGMManager(619 source_repo=self.repo,620 manifest_repo=cbuildbot_config.GetManifestVersionsRepoUrl(621 internal, read_only=False),622 manifest=self._build_config['manifest'],623 build_name=self._bot_id,624 build_type=self._build_config['build_type'],625 incr_type=increment,626 force=self._force,627 branch=self._target_manifest_branch,628 dry_run=self._options.debug,629 master=self._build_config['master'])630 def Initialize(self):631 """Override: Creates an LKGMManager rather than a ManifestManager."""632 self._InitializeRepo()633 ManifestVersionedSyncStage.manifest_manager = self._GetInitializedManager(634 self.internal)635 if (self._build_config['master'] and636 self._GetSlavesForMaster(self._build_config)):637 assert self.internal, 'Unified masters must use an internal checkout.'638 LKGMCandidateSyncStage.sub_manager = self._GetInitializedManager(False)639 def ForceVersion(self, version):640 manifest = super(LKGMCandidateSyncStage, self).ForceVersion(version)641 if LKGMCandidateSyncStage.sub_manager:642 LKGMCandidateSyncStage.sub_manager.BootstrapFromVersion(version)643 return manifest644 def GetNextManifest(self):645 """Gets the next manifest using LKGM logic."""646 assert self.manifest_manager, \647 'Must run Initialize before we can get a manifest.'648 assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \649 'Manifest manager instantiated with wrong class.'650 if self._build_config['master']:651 manifest = self.manifest_manager.CreateNewCandidate()652 if LKGMCandidateSyncStage.sub_manager:653 LKGMCandidateSyncStage.sub_manager.CreateFromManifest(manifest)654 return manifest655 else:656 return self.manifest_manager.GetLatestCandidate()657class CommitQueueSyncStage(LKGMCandidateSyncStage):658 """Commit Queue Sync stage that handles syncing and applying patches.659 This stage handles syncing to a manifest, passing around that manifest to660 other builders and finding the Gerrit Reviews ready to be committed and661 applying them into its out checkout.662 """663 def __init__(self, options, build_config):664 super(CommitQueueSyncStage, self).__init__(options, build_config)665 # Figure out the builder's name from the buildbot waterfall.666 builder_name = build_config['paladin_builder_name']667 self.builder_name = builder_name if builder_name else build_config['name']668 # The pool of patches to be picked up by the commit queue.669 # - For the master commit queue, it's initialized in GetNextManifest.670 # - For slave commit queues, it's initialized in SetPoolFromManifest.671 #672 # In all cases, the pool is saved to disk, and refreshed after bootstrapping673 # by HandleSkip.674 self.pool = None675 def HandleSkip(self):676 """Handles skip and initializes validation pool from manifest."""677 super(CommitQueueSyncStage, self).HandleSkip()678 filename = self._options.validation_pool679 if filename:680 self.pool = validation_pool.ValidationPool.Load(filename)681 else:682 self.SetPoolFromManifest(self.manifest_manager.GetLocalManifest())683 def ChangeFilter(self, pool, changes, non_manifest_changes):684 # First, look for changes that were tested by the Pre-CQ.685 changes_to_test = []686 for change in changes:687 status = pool.GetPreCQStatus(change)688 if status == manifest_version.BuilderStatus.STATUS_PASSED:689 changes_to_test.append(change)690 # If we only see changes that weren't verified by Pre-CQ, try all of the691 # changes. This ensures that the CQ continues to work even if the Pre-CQ is692 # down.693 if not changes_to_test:694 changes_to_test = changes695 return changes_to_test, non_manifest_changes696 def SetPoolFromManifest(self, manifest):697 """Sets validation pool based on manifest path passed in."""698 # Note that GetNextManifest() calls GetLatestCandidate() in this case,699 # so the repo will already be sync'd appropriately. This means that700 # AcquirePoolFromManifest doesn't need to sync.701 self.pool = validation_pool.ValidationPool.AcquirePoolFromManifest(702 manifest, self._build_config['overlays'], self.repo,703 self._options.buildnumber, self.builder_name,704 self._build_config['master'], self._options.debug)705 def GetNextManifest(self):706 """Gets the next manifest using LKGM logic."""707 assert self.manifest_manager, \708 'Must run Initialize before we can get a manifest.'709 assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \710 'Manifest manager instantiated with wrong class.'711 if self._build_config['master']:712 try:713 # In order to acquire a pool, we need an initialized buildroot.714 if not git.FindRepoDir(self.repo.directory):715 self.repo.Initialize()716 pool = validation_pool.ValidationPool.AcquirePool(717 self._build_config['overlays'], self.repo,718 self._options.buildnumber, self.builder_name,719 self._options.debug, check_tree_open=not self._options.debug,720 changes_query=self._options.cq_gerrit_override,721 change_filter=self.ChangeFilter)722 # We only have work to do if there are changes to try.723 try:724 # Try our best to submit these but may have been overridden and won't725 # let that stop us from continuing the build.726 pool.SubmitNonManifestChanges()727 except validation_pool.FailedToSubmitAllChangesException as e:728 cros_build_lib.Warning(str(e))729 self.pool = pool730 except validation_pool.TreeIsClosedException as e:731 cros_build_lib.Warning(str(e))732 return None733 manifest = self.manifest_manager.CreateNewCandidate(validation_pool=pool)734 if LKGMCandidateSyncStage.sub_manager:735 LKGMCandidateSyncStage.sub_manager.CreateFromManifest(manifest)736 return manifest737 else:738 manifest = self.manifest_manager.GetLatestCandidate()739 if manifest:740 self.SetPoolFromManifest(manifest)741 self.pool.ApplyPoolIntoRepo()742 return manifest743 def PerformStage(self):744 """Performs normal stage and prints blamelist at end."""745 if self._options.force_version:746 self.HandleSkip()747 else:748 ManifestVersionedSyncStage.PerformStage(self)749class ManifestVersionedSyncCompletionStage(ForgivingBuilderStage):750 """Stage that records board specific results for a unique manifest file."""751 option_name = 'sync'752 def __init__(self, options, build_config, sync_stage, success):753 super(ManifestVersionedSyncCompletionStage, self).__init__(754 options, build_config)755 self.sync_stage = sync_stage756 self.success = success757 # Message that can be set that well be sent along with the status in758 # UpdateStatus.759 self.message = None760 def PerformStage(self):761 if ManifestVersionedSyncStage.manifest_manager:762 ManifestVersionedSyncStage.manifest_manager.UpdateStatus(763 success=self.success, message=self.message)764class ImportantBuilderFailedException(Exception):765 """Exception thrown when an important build fails to build."""766 pass767class LKGMCandidateSyncCompletionStage(ManifestVersionedSyncCompletionStage):768 """Stage that records whether we passed or failed to build/test manifest."""769 def _GetSlavesStatus(self):770 if self._options.debug:771 # In debug mode, nothing is uploaded to Google Storage, so we bypass772 # the extra hop and just look at what we have locally.773 status = manifest_version.BuilderStatus.GetCompletedStatus(self.success)774 status_obj = manifest_version.BuilderStatus(status, self.message)775 return {self._bot_id: status_obj}776 elif not self._build_config['master']:777 # Slaves only need to look at their own status.778 return ManifestVersionedSyncStage.manifest_manager.GetBuildersStatus(779 [self._bot_id])780 else:781 builders = self._GetSlavesForMaster(self._build_config)782 manager = ManifestVersionedSyncStage.manifest_manager783 sub_manager = LKGMCandidateSyncStage.sub_manager784 if sub_manager:785 public_builders = [b['name'] for b in builders if not b['internal']]786 statuses = sub_manager.GetBuildersStatus(public_builders)787 private_builders = [b['name'] for b in builders if b['internal']]788 statuses.update(manager.GetBuildersStatus(private_builders))789 else:790 statuses = manager.GetBuildersStatus([b['name'] for b in builders])791 return statuses792 def _AbortCQHWTests(self):793 """Abort any HWTests started by the CQ."""794 manifest_manager = ManifestVersionedSyncStage.manifest_manager795 if (cbuildbot_config.IsCQType(self._build_config['build_type']) and796 manifest_manager is not None and797 self._target_manifest_branch == 'master'):798 release_tag = manifest_manager.current_version799 if release_tag and not commands.HaveHWTestsBeenAborted(release_tag):800 commands.AbortHWTests(release_tag, self._options.debug)801 def HandleSuccess(self):802 # We only promote for the pfq, not chrome pfq.803 # TODO(build): Run this logic in debug mode too.804 if (not self._options.debug and805 cbuildbot_config.IsPFQType(self._build_config['build_type']) and806 self._build_config['master'] and807 self._target_manifest_branch == 'master' and808 ManifestVersionedSyncStage.manifest_manager is not None and809 self._build_config['build_type'] != constants.CHROME_PFQ_TYPE):810 ManifestVersionedSyncStage.manifest_manager.PromoteCandidate()811 if LKGMCandidateSyncStage.sub_manager:812 LKGMCandidateSyncStage.sub_manager.PromoteCandidate()813 def HandleValidationFailure(self, failing_statuses):814 cros_build_lib.PrintBuildbotStepWarnings()815 cros_build_lib.Warning('\n'.join([816 'The following builders failed with this manifest:',817 ', '.join(sorted(failing_statuses.keys())),818 'Please check the logs of the failing builders for details.']))819 def HandleValidationTimeout(self, inflight_statuses):820 cros_build_lib.PrintBuildbotStepWarnings()821 cros_build_lib.Warning('\n'.join([822 'The following builders took too long to finish:',823 ', '.join(sorted(inflight_statuses.keys())),824 'Please check the logs of these builders for details.']))825 def PerformStage(self):826 if ManifestVersionedSyncStage.manifest_manager:827 ManifestVersionedSyncStage.manifest_manager.UploadStatus(828 success=self.success, message=self.message)829 if not self.success and self._build_config['important']:830 self._AbortCQHWTests()831 statuses = self._GetSlavesStatus()832 failing_build_dict, inflight_build_dict = {}, {}833 for builder, status in statuses.iteritems():834 if status.Failed():835 failing_build_dict[builder] = status836 elif status.Inflight():837 inflight_build_dict[builder] = status838 if failing_build_dict or inflight_build_dict:839 if failing_build_dict:840 self.HandleValidationFailure(failing_build_dict)841 if inflight_build_dict:842 self.HandleValidationTimeout(inflight_build_dict)843 if failing_build_dict or inflight_build_dict:844 raise results_lib.StepFailure()845 else:846 self.HandleSuccess()847class CommitQueueCompletionStage(LKGMCandidateSyncCompletionStage):848 """Commits or reports errors to CL's that failed to be validated."""849 def HandleSuccess(self):850 if self._build_config['master']:851 self.sync_stage.pool.SubmitPool()852 # After submitting the pool, update the commit hashes for uprevved853 # ebuilds.854 manifest = git.ManifestCheckout.Cached(self._build_root)855 portage_utilities.EBuild.UpdateCommitHashesForChanges(856 self.sync_stage.pool.changes, self._build_root, manifest)857 if cbuildbot_config.IsPFQType(self._build_config['build_type']):858 super(CommitQueueCompletionStage, self).HandleSuccess()859 def HandleValidationFailure(self, failing_statuses):860 """Sends the failure message of all failing builds in one go."""861 super(CommitQueueCompletionStage, self).HandleValidationFailure(862 failing_statuses)863 if self._build_config['master']:864 failing_messages = [x.message for x in failing_statuses.itervalues()]865 self.sync_stage.pool.HandleValidationFailure(failing_messages)866 def HandleValidationTimeout(self, inflight_builders):867 super(CommitQueueCompletionStage, self).HandleValidationTimeout(868 inflight_builders)869 self.sync_stage.pool.HandleValidationTimeout()870 def PerformStage(self):871 if not self.success and self._build_config['important']:872 # This message is sent along with the failed status to the master to873 # indicate a failure.874 self.message = self.sync_stage.pool.GetValidationFailedMessage()875 super(CommitQueueCompletionStage, self).PerformStage()876 if ManifestVersionedSyncStage.manifest_manager:877 ManifestVersionedSyncStage.manifest_manager.UpdateStatus(878 success=self.success, message=self.message)879class PreCQSyncStage(SyncStage):880 """Sync and apply patches to test if they compile."""881 def __init__(self, options, build_config, patches):882 super(PreCQSyncStage, self).__init__(options, build_config)883 # The list of patches to test.884 self.patches = patches885 # The ValidationPool of patches to test. Initialized in PerformStage, and886 # refreshed after bootstrapping by HandleSkip.887 self.pool = None888 def HandleSkip(self):889 """Handles skip and loads validation pool from disk."""890 super(PreCQSyncStage, self).HandleSkip()891 filename = self._options.validation_pool892 if filename:893 self.pool = validation_pool.ValidationPool.Load(filename)894 def PerformStage(self):895 super(PreCQSyncStage, self).PerformStage()896 self.pool = validation_pool.ValidationPool.AcquirePreCQPool(897 self._build_config['overlays'], self._build_root,898 self._options.buildnumber, self._build_config['name'],899 dryrun=self._options.debug_forced, changes=self.patches)900 self.pool.ApplyPoolIntoRepo()901class PreCQCompletionStage(bs.BuilderStage):902 """Reports the status of a trybot run to Google Storage and Gerrit."""903 def __init__(self, options, build_config, sync_stage, success):904 super(PreCQCompletionStage, self).__init__(options, build_config)905 self.sync_stage = sync_stage906 self.success = success907 def PerformStage(self):908 # Update Gerrit and Google Storage with the Pre-CQ status.909 if self.success:910 self.sync_stage.pool.HandlePreCQSuccess()911 else:912 message = self.sync_stage.pool.GetValidationFailedMessage()913 self.sync_stage.pool.HandleValidationFailure([message])914class PreCQLauncherStage(SyncStage):915 """Scans for CLs and automatically launches Pre-CQ jobs to test them."""916 STATUS_INFLIGHT = validation_pool.ValidationPool.STATUS_INFLIGHT917 STATUS_PASSED = validation_pool.ValidationPool.STATUS_PASSED918 STATUS_FAILED = validation_pool.ValidationPool.STATUS_FAILED919 STATUS_LAUNCHING = validation_pool.ValidationPool.STATUS_LAUNCHING920 STATUS_WAITING = validation_pool.ValidationPool.STATUS_WAITING921 # The number of minutes we allow before considering a launch attempt failed.922 # If this window isn't hit in a given launcher run, the window will start923 # again from scratch in the next run.924 LAUNCH_DELAY = 10925 def __init__(self, options, build_config):926 super(PreCQLauncherStage, self).__init__(options, build_config)927 self.skip_sync = True928 self.launching = {}929 self.retried = set()930 def _HasLaunchTimedOut(self, change):931 """Check whether a given |change| has timed out on its trybot launch.932 Assumes that the change is in the middle of being launched.933 Returns:934 True if the change has timed out. False otherwise.935 """936 diff = datetime.timedelta(minutes=self.LAUNCH_DELAY)937 return datetime.datetime.now() - self.launching[change] > diff938 def GetPreCQStatus(self, pool, changes):939 """Get the Pre-CQ status of a list of changes.940 Args:941 pool: The validation pool.942 changes: Changes to examine.943 Returns:944 busy: The set of CLs that are currently being tested.945 passed: The set of CLs that have been verified.946 """947 busy, passed = set(), set()948 for change in changes:949 status = pool.GetPreCQStatus(change)950 if status != self.STATUS_LAUNCHING:951 # The trybot has finished launching, so we should remove it from our952 # data structures.953 self.launching.pop(change, None)954 if status == self.STATUS_LAUNCHING:955 # The trybot is in the process of launching.956 busy.add(change)957 if change not in self.launching:958 self.launching[change] = datetime.datetime.now()959 elif self._HasLaunchTimedOut(change):960 if change in self.retried:961 msg = 'Failed twice to launch a Pre-CQ trybot for this change.'962 pool.SendNotification(change, '%(details)s', details=msg)963 pool.RemoveCommitReady(change)964 pool.UpdatePreCQStatus(change, self.STATUS_FAILED)965 self.retried.discard(change)966 else:967 # Try the change again.968 self.retried.add(change)969 pool.UpdatePreCQStatus(change, self.STATUS_WAITING)970 elif status == self.STATUS_INFLIGHT:971 # Once a Pre-CQ run actually starts, it'll set the status to972 # STATUS_INFLIGHT.973 busy.add(change)974 elif status == self.STATUS_FAILED:975 # The Pre-CQ run failed for this change. It's possible that we got976 # unlucky and this change was just marked as 'Not Ready' by a bot. To977 # test this, mark the CL as 'waiting' for now. If the CL is still marked978 # as 'Ready' next time we check, we'll know the CL is truly still ready.979 busy.add(change)980 pool.UpdatePreCQStatus(change, self.STATUS_WAITING)981 elif status == self.STATUS_PASSED:982 passed.add(change)983 return busy, passed984 def LaunchTrybot(self, pool, plan):985 """Launch a Pre-CQ run with the provided list of CLs.986 Args:987 plan: The list of patches to test in the Pre-CQ run.988 """989 cmd = ['cbuildbot', '--remote', '--nobootstrap',990 constants.PRE_CQ_BUILDER_NAME]991 if self._options.debug_forced:992 cmd.append('--debug')993 for patch in plan:994 number = cros_patch.FormatGerritNumber(995 patch.gerrit_number, force_internal=patch.internal)996 cmd += ['-g', number]997 cros_build_lib.RunCommand(cmd, cwd=self._build_root)998 for patch in plan:999 if pool.GetPreCQStatus(patch) != self.STATUS_PASSED:1000 pool.UpdatePreCQStatus(patch, self.STATUS_LAUNCHING)1001 def GetDisjointTransactionsToTest(self, pool, changes):1002 """Get the list of disjoint transactions to test.1003 Returns:1004 A list of disjoint transactions to test. Each transaction should be sent1005 to a different Pre-CQ trybot.1006 """1007 busy, passed = self.GetPreCQStatus(pool, changes)1008 # Create a list of disjoint transactions to test.1009 manifest = git.ManifestCheckout.Cached(self._build_root)1010 plans = pool.CreateDisjointTransactions(manifest)1011 for plan in plans:1012 # If any of the CLs in the plan are currently "busy" being tested,1013 # wait until they're done before launching our trybot run. This helps1014 # avoid race conditions.1015 #1016 # Similarly, if all of the CLs in the plan have already been validated,1017 # there's no need to launch a trybot run.1018 plan = set(plan)1019 if plan.issubset(passed):1020 logging.info('CLs already verified: %r', ' '.join(map(str, plan)))1021 elif plan.intersection(busy):1022 logging.info('CLs currently being verified: %r',1023 ' '.join(map(str, plan.intersection(busy))))1024 if plan.difference(busy):1025 logging.info('CLs waiting on verification of dependencies: %r',1026 ' '.join(map(str, plan.difference(busy))))1027 else:1028 yield plan1029 def ProcessChanges(self, pool, changes, _non_manifest_changes):1030 """Process a list of changes that were marked as Ready.1031 From our list of changes that were marked as Ready, we create a1032 list of disjoint transactions and send each one to a separate Pre-CQ1033 trybot.1034 Non-manifest changes are just submitted here because they don't need to be1035 verified by either the Pre-CQ or CQ.1036 """1037 # Submit non-manifest changes if we can.1038 if cros_build_lib.TreeOpen(1039 validation_pool.ValidationPool.STATUS_URL, 0, max_timeout=0):1040 try:1041 pool.SubmitNonManifestChanges(check_tree_open=False)1042 except validation_pool.FailedToSubmitAllChangesException as e:1043 cros_build_lib.Warning(str(e))1044 # Launch trybots for manifest changes.1045 for plan in self.GetDisjointTransactionsToTest(pool, changes):1046 self.LaunchTrybot(pool, plan)1047 # Tell ValidationPool to keep waiting for more changes until we hit1048 # its internal timeout.1049 return [], []1050 def PerformStage(self):1051 # Setup and initialize the repo.1052 super(PreCQLauncherStage, self).PerformStage()1053 # Loop through all of the changes until we hit a timeout.1054 validation_pool.ValidationPool.AcquirePool(1055 self._build_config['overlays'], self.repo,1056 self._options.buildnumber, constants.PRE_CQ_BUILDER_NAME,1057 dryrun=self._options.debug_forced,1058 changes_query=self._options.cq_gerrit_override,1059 check_tree_open=False, change_filter=self.ProcessChanges)1060class RefreshPackageStatusStage(bs.BuilderStage):1061 """Stage for refreshing Portage package status in online spreadsheet."""1062 def PerformStage(self):1063 commands.RefreshPackageStatus(buildroot=self._build_root,1064 boards=self._boards,1065 debug=self._options.debug)1066class InitSDKStage(bs.BuilderStage):1067 """Stage that is responsible for initializing the SDK."""1068 option_name = 'build'1069 def __init__(self, options, build_config):1070 super(InitSDKStage, self).__init__(options, build_config)1071 self._env = {}1072 if self._options.clobber:1073 self._env['IGNORE_PREFLIGHT_BINHOST'] = '1'1074 self._latest_toolchain = (self._build_config['latest_toolchain'] or1075 self._options.latest_toolchain)1076 if self._latest_toolchain and self._build_config['gcc_githash']:1077 self._env['USE'] = 'git_gcc'1078 self._env['GCC_GITHASH'] = self._build_config['gcc_githash']1079 def PerformStage(self):1080 chroot_path = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)1081 replace = self._build_config['chroot_replace']1082 if os.path.isdir(self._build_root) and not replace:1083 try:1084 commands.RunChrootUpgradeHooks(self._build_root)1085 except results_lib.BuildScriptFailure:1086 cros_build_lib.PrintBuildbotStepText('Replacing broken chroot')1087 cros_build_lib.PrintBuildbotStepWarnings()1088 replace = True1089 if not os.path.isdir(chroot_path) or replace:1090 use_sdk = (self._build_config['use_sdk'] and not self._options.nosdk)1091 commands.MakeChroot(1092 buildroot=self._build_root,1093 replace=replace,1094 use_sdk=use_sdk,1095 chrome_root=self._options.chrome_root,1096 extra_env=self._env)1097class SetupBoardStage(InitSDKStage):1098 """Stage that is responsible for building host pkgs and setting up a board."""1099 option_name = 'build'1100 def __init__(self, options, build_config, boards=None):1101 super(SetupBoardStage, self).__init__(options, build_config)1102 if boards is not None:1103 self._boards = boards1104 def PerformStage(self):1105 # Calculate whether we should use binary packages.1106 usepkg = (self._build_config['usepkg_setup_board'] and1107 not self._latest_toolchain)1108 # We need to run chroot updates on most builders because they uprev after1109 # the InitSDK stage. For the SDK builder, we can skip updates because uprev1110 # is run prior to InitSDK. This is not just an optimization: It helps1111 # workaround http://crbug.com/2255091112 chroot_upgrade = (1113 self._build_config['build_type'] != constants.CHROOT_BUILDER_TYPE)1114 # Iterate through boards to setup.1115 chroot_path = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)1116 for board_to_build in self._boards:1117 # Only update the board if we need to do so.1118 board_path = os.path.join(chroot_path, 'build', board_to_build)1119 if os.path.isdir(board_path) and not chroot_upgrade:1120 continue1121 commands.SetupBoard(self._build_root,1122 board=board_to_build,1123 usepkg=usepkg,1124 extra_env=self._env,1125 profile=self._options.profile or1126 self._build_config['profile'],1127 chroot_upgrade=chroot_upgrade)1128 chroot_upgrade = False1129 commands.SetSharedUserPassword(1130 self._build_root,1131 password=self._build_config['shared_user_password'])1132class UprevStage(bs.BuilderStage):1133 """Stage that uprevs Chromium OS packages that the builder intends to1134 validate.1135 """1136 option_name = 'uprev'1137 def __init__(self, options, build_config, boards=None, enter_chroot=True):1138 super(UprevStage, self).__init__(options, build_config)1139 self._enter_chroot = enter_chroot1140 if boards is not None:1141 self._boards = boards1142 def PerformStage(self):1143 # Perform other uprevs.1144 if self._build_config['uprev']:1145 overlays, _ = self._ExtractOverlays()1146 commands.UprevPackages(self._build_root,1147 self._boards,1148 overlays,1149 enter_chroot=self._enter_chroot)1150class SyncChromeStage(bs.BuilderStage):1151 """Stage that syncs Chrome sources if needed."""1152 option_name = 'managed_chrome'1153 def __init__(self, options, build_config):1154 super(SyncChromeStage, self).__init__(options, build_config)1155 # PerformStage() will fill this out for us.1156 self.chrome_version = None1157 def PerformStage(self):1158 # Perform chrome uprev.1159 chrome_atom_to_build = None1160 if self._chrome_rev:1161 chrome_atom_to_build = commands.MarkChromeAsStable(1162 self._build_root, self._target_manifest_branch,1163 self._chrome_rev, self._boards,1164 chrome_version=self._options.chrome_version)1165 kwargs = {}1166 if self._chrome_rev == constants.CHROME_REV_SPEC:1167 kwargs['revision'] = self._options.chrome_version1168 cpv = None1169 cros_build_lib.PrintBuildbotStepText('revision %s' % kwargs['revision'])1170 self.chrome_version = self._options.chrome_version1171 else:1172 cpv = portage_utilities.BestVisible(constants.CHROME_CP,1173 buildroot=self._build_root)1174 kwargs['tag'] = cpv.version_no_rev.partition('_')[0]1175 cros_build_lib.PrintBuildbotStepText('tag %s' % kwargs['tag'])1176 self.chrome_version = kwargs['tag']1177 useflags = self._build_config['useflags'] or []1178 commands.SyncChrome(self._build_root, self._options.chrome_root, useflags,1179 **kwargs)1180 if (self._chrome_rev and not chrome_atom_to_build and1181 self._options.buildbot and1182 self._build_config['build_type'] == constants.CHROME_PFQ_TYPE):1183 cros_build_lib.Info('Chrome already uprevved')1184 sys.exit(0)1185class PatchChromeStage(bs.BuilderStage):1186 """Stage that applies Chrome patches if needed."""1187 option_name = 'rietveld_patches'1188 def PerformStage(self):1189 for patch in ' '.join(self._options.rietveld_patches).split():1190 patch, colon, subdir = patch.partition(':')1191 if not colon:1192 subdir = 'src'1193 commands.PatchChrome(self._options.chrome_root, patch, subdir)1194class BuildPackagesStage(ArchivingStage):1195 """Build Chromium OS packages."""1196 option_name = 'build'1197 def __init__(self, options, build_config, board, archive_stage,1198 pgo_generate=False, pgo_use=False):1199 useflags = build_config['useflags'][:] if build_config['useflags'] else []1200 self._pgo_generate, self._pgo_use = pgo_generate, pgo_use1201 suffix = None1202 assert not pgo_generate or not pgo_use1203 if pgo_generate:1204 suffix = ' [%s]' % constants.USE_PGO_GENERATE1205 useflags.append(constants.USE_PGO_GENERATE)1206 elif pgo_use:1207 suffix = ' [%s]' % constants.USE_PGO_USE1208 useflags.append(constants.USE_PGO_USE)1209 super(BuildPackagesStage, self).__init__(options, build_config, board,1210 archive_stage, suffix=suffix)1211 self._env = {}1212 if useflags:1213 self._env['USE'] = ' '.join(useflags)1214 if self._options.chrome_root:1215 self._env['CHROME_ORIGIN'] = 'LOCAL_SOURCE'1216 if self._options.clobber:1217 self._env['IGNORE_PREFLIGHT_BINHOST'] = '1'1218 self._build_autotest = (self._build_config['build_tests'] and1219 self._options.tests)1220 def _GetArchitectures(self):1221 """Get the list of architectures built by this builder."""1222 return set(self._GetPortageEnvVar('ARCH', b) for b in self._boards)1223 def PerformStage(self):1224 # Wait for PGO data to be ready if needed.1225 if self._pgo_use:1226 cpv = portage_utilities.BestVisible(constants.CHROME_CP,1227 buildroot=self._build_root)1228 commands.WaitForPGOData(self._GetArchitectures(), cpv)1229 commands.Build(self._build_root,1230 self._current_board,1231 build_autotest=self._build_autotest,1232 usepkg=self._build_config['usepkg_build_packages'],1233 nowithdebug=self._build_config['nowithdebug'],1234 packages=self._build_config['packages'],1235 skip_chroot_upgrade=True,1236 chrome_root=self._options.chrome_root,1237 extra_env=self._env)1238class BuildImageStage(BuildPackagesStage):1239 """Build standard Chromium OS images."""1240 option_name = 'build'1241 def _BuildImages(self):1242 # We only build base, dev, and test images from this stage.1243 if self._pgo_generate:1244 images_can_build = set(['test'])1245 else:1246 images_can_build = set(['base', 'dev', 'test'])1247 images_to_build = set(self._build_config['images']).intersection(1248 images_can_build)1249 version = self.archive_stage.release_tag1250 disk_layout = self._build_config['disk_layout']1251 if self._pgo_generate:1252 disk_layout = constants.PGO_GENERATE_DISK_LAYOUT1253 if version:1254 version = '%s-pgo-generate' % version1255 rootfs_verification = self._build_config['rootfs_verification']1256 commands.BuildImage(self._build_root,1257 self._current_board,1258 sorted(images_to_build),1259 rootfs_verification=rootfs_verification,1260 version=version,1261 disk_layout=disk_layout,1262 extra_env=self._env)1263 # Update link to latest image.1264 latest_image = os.readlink(self.GetImageDirSymlink('latest'))1265 cbuildbot_image_link = self.GetImageDirSymlink()1266 if os.path.lexists(cbuildbot_image_link):1267 os.remove(cbuildbot_image_link)1268 os.symlink(latest_image, cbuildbot_image_link)1269 parallel.RunParallelSteps(1270 [self._BuildVMImage, self.ArchivePayloads,1271 lambda: self._GenerateAuZip(cbuildbot_image_link)])1272 def _BuildVMImage(self):1273 if self._build_config['vm_tests'] and not self._pgo_generate:1274 commands.BuildVMImageForTesting(1275 self._build_root,1276 self._current_board,1277 disk_layout=self._build_config['disk_vm_layout'],1278 extra_env=self._env)1279 def ArchivePayloads(self):1280 """Archives update payloads when they are ready."""1281 with osutils.TempDir(prefix='cbuildbot-payloads') as tempdir:1282 with self.ArtifactUploader() as queue:1283 if self._build_config['upload_hw_test_artifacts']:1284 image_path = os.path.join(self.GetImageDirSymlink(),1285 'chromiumos_test_image.bin')1286 # For non release builds, we are only interested in generating1287 # payloads for the purpose of imaging machines. This means we1288 # shouldn't generate delta payloads for n-1->n testing.1289 # TODO: Add a config flag for generating delta payloads instead.1290 if (self._build_config['build_type'] == constants.CANARY_TYPE and1291 not self._pgo_generate):1292 commands.GenerateNPlus1Payloads(1293 self._build_root, self.bot_archive_root, image_path, tempdir)1294 else:1295 commands.GenerateFullPayload(self._build_root, image_path, tempdir)1296 for payload in os.listdir(tempdir):1297 queue.put([os.path.join(tempdir, payload)])1298 def _GenerateAuZip(self, image_dir):1299 """Create au-generator.zip."""1300 if not self._pgo_generate:1301 commands.GenerateAuZip(self._build_root,1302 image_dir,1303 extra_env=self._env)1304 def _BuildAutotestTarballs(self):1305 with osutils.TempDir(prefix='cbuildbot-autotest') as tempdir:1306 with self.ArtifactUploader() as queue:1307 cwd = os.path.join(self._build_root, 'chroot', 'build',1308 self._current_board, 'usr', 'local')1309 # Find the control files in autotest/1310 control_files = commands.FindFilesWithPattern(1311 'control*', target='autotest', cwd=cwd)1312 # Tar the control files and the packages.1313 autotest_tarball = os.path.join(tempdir, 'autotest.tar')1314 input_list = control_files + ['autotest/packages']1315 commands.BuildTarball(self._build_root, input_list, autotest_tarball,1316 cwd=cwd, compressed=False)1317 queue.put([autotest_tarball])1318 # Tar up the test suites.1319 test_suites_tarball = os.path.join(tempdir, 'test_suites.tar.bz2')1320 commands.BuildTarball(self._build_root, ['autotest/test_suites'],1321 test_suites_tarball, cwd=cwd)1322 queue.put([test_suites_tarball])1323 def PerformStage(self):1324 # Build images and autotest tarball in parallel.1325 steps = []1326 if (self._build_config['upload_hw_test_artifacts'] or1327 self._build_config['archive_build_debug']) and self._build_autotest:1328 steps.append(self._BuildAutotestTarballs)1329 if self._build_config['images']:1330 steps.append(self._BuildImages)1331 parallel.RunParallelSteps(steps)1332class SignerTestStage(ArchivingStage):1333 """Run signer related tests."""1334 option_name = 'tests'1335 config_name = 'signer_tests'1336 # If the signer tests take longer than 30 minutes, abort. They usually take1337 # five minutes to run.1338 SIGNER_TEST_TIMEOUT = 18001339 def PerformStage(self):1340 if not self.archive_stage.WaitForRecoveryImage():1341 raise InvalidTestConditionException('Missing recovery image.')1342 with cros_build_lib.SubCommandTimeout(self.SIGNER_TEST_TIMEOUT):1343 commands.RunSignerTests(self._build_root, self._current_board)1344class UnitTestStage(BoardSpecificBuilderStage):1345 """Run unit tests."""1346 option_name = 'tests'1347 config_name = 'unittests'1348 # If the unit tests take longer than 70 minutes, abort. They usually take1349 # ten minutes to run.1350 #1351 # If the processes hang, parallel_emerge will print a status report after 601352 # minutes, so we picked 70 minutes because it gives us a little buffer time.1353 UNIT_TEST_TIMEOUT = 70 * 601354 def PerformStage(self):1355 with cros_build_lib.SubCommandTimeout(self.UNIT_TEST_TIMEOUT):1356 commands.RunUnitTests(self._build_root,1357 self._current_board,1358 full=(not self._build_config['quick_unit']),1359 nowithdebug=self._build_config['nowithdebug'],1360 blacklist=self._build_config['unittest_blacklist'])1361class VMTestStage(ArchivingStage):1362 """Run autotests in a virtual machine."""1363 option_name = 'tests'1364 config_name = 'vm_tests'1365 def _ArchiveTestResults(self, test_results_dir):1366 """Archives test results to Google Storage."""1367 test_tarball = commands.ArchiveTestResults(1368 self._build_root, test_results_dir, prefix='')1369 # Wait for breakpad symbols.1370 got_symbols = self.archive_stage.WaitForBreakpadSymbols()1371 filenames = commands.GenerateStackTraces(1372 self._build_root, self._current_board, test_tarball, self.archive_path,1373 got_symbols)1374 filenames.append(commands.ArchiveFile(test_tarball, self.archive_path))1375 cros_build_lib.Info('Uploading artifacts to Google Storage...')1376 with self.ArtifactUploader(archive=False, strict=False) as queue:1377 for filename in filenames:1378 queue.put([filename])1379 prefix = 'crash: ' if filename.endswith('.dmp.txt') else ''1380 self.PrintDownloadLink(filename, prefix)1381 def PerformStage(self):1382 # These directories are used later to archive test artifacts.1383 test_results_dir = commands.CreateTestRoot(self._build_root)1384 try:1385 test_type = self._build_config['vm_tests']1386 commands.RunTestSuite(self._build_root,1387 self._current_board,1388 self.GetImageDirSymlink(),1389 os.path.join(test_results_dir,1390 'test_harness'),1391 test_type=test_type,1392 whitelist_chrome_crashes=self._chrome_rev is None,1393 archive_dir=self.bot_archive_root)1394 if self._build_config['build_type'] == constants.CANARY_TYPE:1395 commands.RunDevModeTest(1396 self._build_root, self._current_board, self.GetImageDirSymlink())1397 except Exception:1398 cros_build_lib.Error(_VM_TEST_ERROR_MSG)1399 raise1400 finally:1401 self._ArchiveTestResults(test_results_dir)1402class TestTimeoutException(Exception):1403 """Raised when a critical test times out."""1404 pass1405class InvalidTestConditionException(Exception):1406 """Raised when pre-conditions for a test aren't met."""1407 pass1408class HWTestStage(ArchivingStage):1409 """Stage that runs tests in the Autotest lab."""1410 option_name = 'tests'1411 config_name = 'hw_tests'1412 PERF_RESULTS_EXTENSION = 'results'1413 def __init__(self, options, build_config, board, archive_stage, suite_config):1414 super(HWTestStage, self).__init__(options, build_config, board,1415 archive_stage,1416 suffix=' [%s]' % suite_config.suite)1417 self.suite_config = suite_config1418 self.wait_for_results = True1419 def _PrintFile(self, filename):1420 with open(filename) as f:1421 print f.read()1422 def _SendPerfResults(self):1423 """Sends the perf results from the test to the perf dashboard."""1424 result_file_name = '%s.%s' % (self.suite_config.suite,1425 HWTestStage.PERF_RESULTS_EXTENSION)1426 gs_results_file = '/'.join([self.upload_url, result_file_name])1427 gs_context = gs.GSContext()1428 gs_context.Copy(gs_results_file, self._options.log_dir)1429 # Prints out the actual result from gs_context.Copy.1430 logging.info('Copy of %s completed. Printing below:', result_file_name)1431 self._PrintFile(os.path.join(self._options.log_dir, result_file_name))1432 def _CheckAborted(self):1433 aborted = (self.archive_stage.release_tag and1434 commands.HaveHWTestsBeenAborted(self.archive_stage.release_tag))1435 if aborted:1436 cros_build_lib.PrintBuildbotStepText('aborted')1437 cros_build_lib.Warning('HWTests aborted')1438 return aborted1439 # Disable complaint about calling _HandleStageException.1440 # pylint: disable=W02121441 def _HandleStageException(self, exception):1442 """Override and don't set status to FAIL but FORGIVEN instead."""1443 # 2 for warnings returned by run_suite.py, or CLIENT_HTTP_CODE error1444 # returned by autotest_rpc_client.py. It is the former that we care about.1445 # 11, 12, 13 for cases when rpc is down, see autotest_rpc_errors.py.1446 codes_handled_as_warning = (2, 11, 12, 13)1447 if self.suite_config.critical:1448 return super(HWTestStage, self)._HandleStageException(exception)1449 is_lab_down = (isinstance(exception, lab_status.LabIsDownException) or1450 isinstance(exception, lab_status.BoardIsDisabledException))1451 is_warning_code = (isinstance(exception, cros_build_lib.RunCommandError) and1452 exception.result.returncode in codes_handled_as_warning)1453 if is_lab_down or is_warning_code or self._CheckAborted():1454 return self._HandleExceptionAsWarning(exception)1455 else:1456 return super(HWTestStage, self)._HandleStageException(exception)1457 def DealWithTimeout(self, exception):1458 if not self.suite_config.critical and not self.suite_config.fatal_timeouts:1459 return self._HandleExceptionAsWarning(exception)1460 return super(HWTestStage, self)._HandleStageException(exception)1461 def PerformStage(self):1462 if self._CheckAborted():1463 cros_build_lib.Info('Skipping HWTests as they have been aborted.')1464 return1465 build = '/'.join([self._bot_id, self.version])1466 if self._options.remote_trybot and self._options.hwtest:1467 debug = self._options.debug_forced1468 else:1469 debug = self._options.debug1470 try:1471 lab_status.CheckLabStatus(self._current_board)1472 with cros_build_lib.SubCommandTimeout(self.suite_config.timeout):1473 commands.RunHWTestSuite(build,1474 self.suite_config.suite,1475 self._current_board,1476 self.suite_config.pool,1477 self.suite_config.num,1478 self.suite_config.file_bugs,1479 self.wait_for_results,1480 debug)1481 if self.suite_config.copy_perf_results:1482 self._SendPerfResults()1483 except cros_build_lib.TimeoutError as exception:1484 return self.DealWithTimeout(exception)1485class AUTestStage(HWTestStage):1486 """Stage for au hw test suites that requires special pre-processing."""1487 def PerformStage(self):1488 """Wait for payloads to be staged and uploads its au control files."""1489 with osutils.TempDir() as tempdir:1490 tarball = commands.BuildAUTestTarball(1491 self._build_root, self._current_board, tempdir,1492 self.version, self.upload_url)1493 self.UploadArtifact(tarball)1494 super(AUTestStage, self).PerformStage()1495class ASyncHWTestStage(HWTestStage, ForgivingBuilderStage):1496 """Stage that fires and forgets hw test suites to the Autotest lab."""1497 def __init__(self, *args, **dargs):1498 super(ASyncHWTestStage, self).__init__(self, *args, **dargs)1499 self.wait_for_results = False1500class SDKPackageStage(bs.BuilderStage):1501 """Stage that performs preparing and packaging SDK files"""1502 # Version of the Manifest file being generated. Should be incremented for1503 # Major format changes.1504 MANIFEST_VERSION = '1'1505 _EXCLUDED_PATHS = ('usr/lib/debug', 'usr/local/autotest', 'packages', 'tmp')1506 def PerformStage(self):1507 tarball_name = 'built-sdk.tar.xz'1508 tarball_location = os.path.join(self._build_root, tarball_name)1509 chroot_location = os.path.join(self._build_root,1510 constants.DEFAULT_CHROOT_DIR)1511 board_location = os.path.join(chroot_location, 'build/amd64-host')1512 manifest_location = os.path.join(self._build_root,1513 '%s.Manifest' % tarball_name)1514 # Create a tarball of the latest SDK.1515 self.CreateSDKTarball(chroot_location, board_location, tarball_location)1516 # Create a package manifest for the tarball.1517 self.CreateManifestFromSDK(board_location, manifest_location)1518 # Create toolchain packages.1519 self.CreateRedistributableToolchains(chroot_location)1520 # Make sure the regular user has the permission to read.1521 cmd = ['chmod', 'a+r', tarball_location]1522 cros_build_lib.SudoRunCommand(cmd, cwd=board_location)1523 def CreateRedistributableToolchains(self, chroot_location):1524 osutils.RmDir(os.path.join(chroot_location,1525 constants.SDK_TOOLCHAINS_OUTPUT),1526 ignore_missing=True)1527 cros_build_lib.RunCommand(1528 ['cros_setup_toolchains', '--create-packages',1529 '--output-dir', os.path.join('/', constants.SDK_TOOLCHAINS_OUTPUT)],1530 enter_chroot=True)1531 def CreateSDKTarball(self, _chroot, sdk_path, dest_tarball):1532 """Creates an SDK tarball from a given source chroot.1533 Args:1534 chroot: A chroot used for finding compression tool.1535 sdk_path: Path to the root of newly generated SDK image.1536 dest_tarball: Path of the tarball that should be created.1537 """1538 # TODO(zbehan): We cannot use xz from the chroot unless it's1539 # statically linked.1540 extra_args = ['--exclude=%s/*' % path for path in self._EXCLUDED_PATHS]1541 # Options for maximum compression.1542 extra_env = { 'XZ_OPT' : '-e9' }1543 cros_build_lib.CreateTarball(1544 dest_tarball, sdk_path, sudo=True, extra_args=extra_args,1545 extra_env=extra_env)1546 def CreateManifestFromSDK(self, sdk_path, dest_manifest):1547 """Creates a manifest from a given source chroot.1548 Args:1549 sdk_path: Path to the root of the SDK to describe.1550 dest_manifest: Path to the manifest that should be generated.1551 """1552 package_data = {}1553 for key, version in portage_utilities.ListInstalledPackages(sdk_path):1554 package_data.setdefault(key, []).append((version, {}))1555 self._WriteManifest(package_data, dest_manifest)1556 def _WriteManifest(self, data, manifest):1557 """Encode manifest into a json file."""1558 json_input = dict(version=self.MANIFEST_VERSION, packages=data)1559 osutils.WriteFile(manifest, json.dumps(json_input))1560class SDKTestStage(bs.BuilderStage):1561 """Stage that performs testing an SDK created in a previous stage"""1562 option_name = 'tests'1563 def PerformStage(self):1564 tarball_location = os.path.join(self._build_root, 'built-sdk.tar.xz')1565 new_chroot_cmd = ['cros_sdk', '--chroot', 'new-sdk-chroot']1566 # Build a new SDK using the provided tarball.1567 cmd = new_chroot_cmd + ['--download', '--replace', '--nousepkg',1568 '--url', 'file://' + tarball_location]1569 cros_build_lib.RunCommand(cmd, cwd=self._build_root)1570 for board in self._boards:1571 cros_build_lib.PrintBuildbotStepText(board)1572 cmd = new_chroot_cmd + ['--', './setup_board',1573 '--board', board, '--skip_chroot_upgrade']1574 cros_build_lib.RunCommand(cmd, cwd=self._build_root)1575 cmd = new_chroot_cmd + ['--', './build_packages',1576 '--board', board, '--nousepkg', '--skip_chroot_upgrade']1577 cros_build_lib.RunCommand(cmd, cwd=self._build_root)1578class NothingToArchiveException(Exception):1579 """Thrown if ArchiveStage found nothing to archive."""1580 def __init__(self, message='No images found to archive.'):1581 super(NothingToArchiveException, self).__init__(message)1582class ArchiveStage(ArchivingStage):1583 """Archives build and test artifacts for developer consumption.1584 Attributes:1585 release_tag: The release tag. E.g. 2981.0.01586 version: The full version string, including the milestone.1587 E.g. R26-2981.0.0-b1231588 """1589 option_name = 'archive'1590 # This stage is intended to run in the background, in parallel with tests.1591 def __init__(self, options, build_config, board, release_tag,1592 chrome_version=None):1593 self.release_tag = release_tag1594 self._chrome_version = chrome_version1595 super(ArchiveStage, self).__init__(options, build_config, board, self)1596 self._breakpad_symbols_queue = multiprocessing.Queue()1597 self._recovery_image_status_queue = multiprocessing.Queue()1598 self._release_upload_queue = multiprocessing.Queue()1599 self._upload_queue = multiprocessing.Queue()1600 self._upload_symbols_queue = multiprocessing.Queue()1601 self._pkg_dir = os.path.join(1602 self._build_root, constants.DEFAULT_CHROOT_DIR,1603 'build', self._current_board, 'var', 'db', 'pkg')1604 # Setup the archive path. This is used by other stages.1605 self._SetupArchivePath()1606 @cros_build_lib.MemoizedSingleCall1607 def GetVersionInfo(self):1608 """Helper for picking apart various version bits"""1609 return manifest_version.VersionInfo.from_repo(self._build_root)1610 @cros_build_lib.MemoizedSingleCall1611 def GetVersion(self):1612 """Helper for calculating self.version."""1613 verinfo = self.GetVersionInfo()1614 calc_version = self.release_tag or verinfo.VersionString()1615 calc_version = 'R%s-%s' % (verinfo.chrome_branch, calc_version)1616 # Non-versioned builds need the build number to uniquify the image.1617 if not self.release_tag:1618 calc_version += '-b%s' % self._options.buildnumber1619 return calc_version1620 def WaitForRecoveryImage(self):1621 """Wait until artifacts needed by SignerTest stage are created.1622 Returns:1623 True if artifacts created successfully.1624 False otherwise.1625 """1626 cros_build_lib.Info('Waiting for recovery image...')1627 status = self._recovery_image_status_queue.get()1628 # Put the status back so other SignerTestStage instances don't starve.1629 self._recovery_image_status_queue.put(status)1630 return status1631 def _BreakpadSymbolsGenerated(self, success):1632 """Signal that breakpad symbols have been generated.1633 Arguments:1634 success: True to indicate the symbols were generated, else False.1635 """1636 self._breakpad_symbols_queue.put(success)1637 def WaitForBreakpadSymbols(self):1638 """Wait for the breakpad symbols to be generated.1639 Returns:1640 True if the breakpad symbols were generated.1641 False if the breakpad symbols were not generated within 20 mins.1642 """1643 success = False1644 cros_build_lib.Info('Waiting for breakpad symbols...')1645 try:1646 # TODO: Clean this up so that we no longer rely on a timeout1647 success = self._breakpad_symbols_queue.get(True, 1200)1648 except Queue.Empty:1649 cros_build_lib.Warning(1650 'Breakpad symbols were not generated within timeout period.')1651 return success1652 def _SetupArchivePath(self):1653 """Create a fresh directory for archiving a build."""1654 if self._options.buildbot:1655 # Buildbot: Clear out any leftover build artifacts, if present.1656 shutil.rmtree(self.archive_path, ignore_errors=True)1657 else:1658 # Clear the list of uploaded file if it exists1659 osutils.SafeUnlink(os.path.join(self.archive_path,1660 commands.UPLOADED_LIST_FILENAME))1661 os.makedirs(self.archive_path)1662 def RefreshMetadata(self, stage, final_status=None):1663 """Create a JSON of various metadata describing this build."""1664 config = self._build_config1665 acl = None if self._build_config['internal'] else 'public-read'1666 start_time = results_lib.Results.start_time1667 current_time = datetime.datetime.now()1668 start_time_stamp = cros_build_lib.UserDateTimeFormat(timeval=start_time)1669 current_time_stamp = cros_build_lib.UserDateTimeFormat(timeval=current_time)1670 duration = '%s' % (current_time - start_time,)1671 sdk_verinfo = cros_build_lib.LoadKeyValueFile(1672 os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE),1673 ignore_missing=True)1674 verinfo = self.GetVersionInfo()1675 metadata = {1676 # Version of the metadata format.1677 'metadata-version': '2',1678 # Data for this build.1679 'bot-config': config['name'],1680 'bot-hostname': cros_build_lib.GetHostName(fully_qualified=True),1681 'boards': config['boards'],1682 'build-number': self._options.buildnumber,1683 'builder-name': os.environ.get('BUILDBOT_BUILDERNAME'),1684 'status': {1685 'current-time': current_time_stamp,1686 'status': final_status if final_status else 'running',1687 'summary': stage,1688 },1689 'time': {1690 'start': start_time_stamp,1691 'finish': current_time_stamp if final_status else '',1692 'duration': duration,1693 },1694 'version': {1695 'chrome': self._chrome_version,1696 'full': self.version,1697 'milestone': verinfo.chrome_branch,1698 'platform': self.release_tag or verinfo.VersionString(),1699 },1700 # Data for the toolchain used.1701 'sdk-version': sdk_verinfo.get('SDK_LATEST_VERSION', '<unknown>'),1702 'toolchain-url': sdk_verinfo.get('TC_PATH', '<unknown>'),1703 }1704 if len(config['boards']) == 1:1705 toolchains = toolchain.GetToolchainsForBoard(config['boards'][0])1706 metadata['toolchain-tuple'] = (1707 toolchain.FilterToolchains(toolchains, 'default', True).keys() +1708 toolchain.FilterToolchains(toolchains, 'default', False).keys())1709 metadata['results'] = []1710 for name, result, description, run_time in results_lib.Results.Get():1711 timestr = datetime.timedelta(seconds=math.ceil(run_time))1712 if result in results_lib.Results.NON_FAILURE_TYPES:1713 status = 'passed'1714 else:1715 status = 'failed'1716 metadata['results'].append({1717 'name': name,1718 'status': status,1719 # The result might be a custom exception.1720 'summary': str(result),1721 'duration': '%s' % timestr,1722 'description': description,1723 'log': self.ConstructDashboardURL(stage=name),1724 })1725 metadata_json = os.path.join(self.archive_path, constants.METADATA_JSON)1726 # Stages may run in parallel, so we have to do atomic updates on this.1727 osutils.WriteFile(metadata_json, json.dumps(metadata), atomic=True)1728 commands.UploadArchivedFile(self.archive_path, self.upload_url,1729 os.path.basename(metadata_json),1730 debug=self.debug, acl=acl,1731 update_list=bool(final_status))1732 @staticmethod1733 def _SingleMatchGlob(path_pattern):1734 """Returns the last match (after sort) if multiple found."""1735 files = glob.glob(path_pattern)1736 files.sort()1737 if not files:1738 raise NothingToArchiveException('No %s found!' % path_pattern)1739 elif len(files) > 1:1740 cros_build_lib.PrintBuildbotStepWarnings()1741 cros_build_lib.Warning('Expecting one result for %s package, but '1742 'found multiple.', path_pattern)1743 return files[-1]1744 def ArchiveStrippedChrome(self):1745 """Generate and upload stripped Chrome package."""1746 cmd = ['strip_package', '--board', self._current_board,1747 constants.CHROME_PN]1748 cros_build_lib.RunCommand(cmd, cwd=self._build_root, enter_chroot=True)1749 pkg_dir = os.path.join(1750 self._build_root, constants.DEFAULT_CHROOT_DIR, 'build',1751 self._current_board, 'stripped-packages')1752 chrome_tarball = self._SingleMatchGlob(1753 os.path.join(pkg_dir, constants.CHROME_CP) + '-*')1754 filename = os.path.basename(chrome_tarball)1755 os.link(chrome_tarball, os.path.join(self.archive_path, filename))1756 self._upload_queue.put([filename])1757 def BuildAndArchiveChromeSysroot(self):1758 """Generate and upload sysroot for building Chrome."""1759 assert self.archive_path.startswith(self._build_root)1760 in_chroot_path = git.ReinterpretPathForChroot(self.archive_path)1761 cmd = ['cros_generate_sysroot', '--out-dir', in_chroot_path, '--board',1762 self._current_board, '--package', constants.CHROME_CP]1763 cros_build_lib.RunCommand(cmd, cwd=self._build_root, enter_chroot=True)1764 self._upload_queue.put([constants.CHROME_SYSROOT_TAR])1765 def ArchiveChromeEbuildEnv(self):1766 """Generate and upload Chrome ebuild environment."""1767 chrome_dir = self._SingleMatchGlob(1768 os.path.join(self._pkg_dir, constants.CHROME_CP) + '-*')1769 env_bzip = os.path.join(chrome_dir, 'environment.bz2')1770 with osutils.TempDir() as tempdir:1771 # Convert from bzip2 to tar format.1772 bzip2 = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)1773 cros_build_lib.RunCommand(1774 [bzip2, '-d', env_bzip, '-c'],1775 log_stdout_to_file=os.path.join(tempdir, constants.CHROME_ENV_FILE))1776 env_tar = os.path.join(self.archive_path, constants.CHROME_ENV_TAR)1777 cros_build_lib.CreateTarball(env_tar, tempdir)1778 self._upload_queue.put([os.path.basename(env_tar)])1779 def PerformStage(self):1780 buildroot = self._build_root1781 config = self._build_config1782 board = self._current_board1783 debug = self.debug1784 upload_url = self.upload_url1785 archive_path = self.archive_path1786 image_dir = self.GetImageDirSymlink()1787 extra_env = {}1788 if config['useflags']:1789 extra_env['USE'] = ' '.join(config['useflags'])1790 if not archive_path:1791 raise NothingToArchiveException()1792 # The following functions are run in parallel (except where indicated1793 # otherwise)1794 # \- BuildAndArchiveArtifacts1795 # \- ArchiveReleaseArtifacts1796 # \- ArchiveDebugSymbols1797 # \- ArchiveFirmwareImages1798 # \- BuildAndArchiveAllImages1799 # (builds recovery image first, then launches functions below)1800 # \- BuildAndArchiveFactoryImages1801 # \- ArchiveStandaloneTarballs1802 # \- ArchiveStandaloneTarball1803 # \- ArchiveZipFiles1804 # \- ArchiveHWQual1805 # \- PushImage (blocks on BuildAndArchiveAllImages)1806 # \- ArchiveStrippedChrome1807 # \- BuildAndArchiveChromeSysroot1808 # \- ArchiveChromeEbuildEnv1809 # \- ArchiveImageScripts1810 def ArchiveDebugSymbols():1811 """Generate debug symbols and upload debug.tgz."""1812 if config['archive_build_debug'] or config['vm_tests']:1813 success = False1814 try:1815 commands.GenerateBreakpadSymbols(buildroot, board)1816 success = True1817 finally:1818 self._BreakpadSymbolsGenerated(success)1819 # Kick off the symbol upload process in the background.1820 if config['upload_symbols']:1821 self._upload_symbols_queue.put([])1822 # Generate and upload tarball.1823 filename = commands.GenerateDebugTarball(1824 buildroot, board, archive_path, config['archive_build_debug'])1825 self._release_upload_queue.put([filename])1826 else:1827 self._BreakpadSymbolsGenerated(False)1828 def UploadSymbols():1829 """Upload generated debug symbols."""1830 if not debug:1831 commands.UploadSymbols(buildroot, board, config['chromeos_official'])1832 def BuildAndArchiveFactoryImages():1833 """Build and archive the factory zip file.1834 The factory zip file consists of the factory test image and the factory1835 install image. Both are built here.1836 """1837 # Build factory test image and create symlink to it.1838 factory_test_symlink = None1839 if 'factory_test' in config['images']:1840 alias = commands.BuildFactoryTestImage(buildroot, board, extra_env)1841 factory_test_symlink = self.GetImageDirSymlink(alias)1842 # Build factory install image and create a symlink to it.1843 factory_install_symlink = None1844 if 'factory_install' in config['images']:1845 alias = commands.BuildFactoryInstallImage(buildroot, board, extra_env)1846 factory_install_symlink = self.GetImageDirSymlink(alias)1847 if config['factory_install_netboot']:1848 commands.MakeNetboot(buildroot, board, factory_install_symlink)1849 # Build and upload factory zip.1850 if factory_install_symlink and factory_test_symlink:1851 image_root = os.path.dirname(factory_install_symlink)1852 filename = commands.BuildFactoryZip(1853 buildroot, board, archive_path, image_root)1854 self._release_upload_queue.put([filename])1855 def ArchiveStandaloneTarball(image_file):1856 """Build and upload a single tarball."""1857 self._release_upload_queue.put([commands.BuildStandaloneImageTarball(1858 archive_path, image_file)])1859 def ArchiveStandaloneTarballs():1860 """Build and upload standalone tarballs for each image."""1861 if config['upload_standalone_images']:1862 inputs = []1863 for image_file in glob.glob(os.path.join(image_dir, '*.bin')):1864 if os.path.basename(image_file) != 'chromiumos_qemu_image.bin':1865 inputs.append([image_file])1866 parallel.RunTasksInProcessPool(ArchiveStandaloneTarball, inputs)1867 def ArchiveZipFiles():1868 """Build and archive zip files.1869 This includes:1870 - image.zip (all images in one big zip file)1871 - the au-generator.zip used for update payload generation.1872 """1873 # Zip up everything in the image directory.1874 image_zip = commands.BuildImageZip(archive_path, image_dir)1875 self._release_upload_queue.put([image_zip])1876 # Archive au-generator.zip.1877 filename = 'au-generator.zip'1878 shutil.copy(os.path.join(image_dir, filename), archive_path)1879 self._release_upload_queue.put([filename])1880 def ArchiveHWQual():1881 """Build and archive the HWQual images."""1882 # TODO(petermayo): This logic needs to be exported from the BuildTargets1883 # stage rather than copied/re-evaluated here.1884 autotest_built = config['build_tests'] and self._options.tests and (1885 config['upload_hw_test_artifacts'] or config['archive_build_debug'])1886 if config['hwqual'] and autotest_built:1887 # Build the full autotest tarball for hwqual image. We don't upload it,1888 # as it's fairly large and only needed by the hwqual tarball.1889 cros_build_lib.Info('Archiving full autotest tarball locally ...')1890 tarball = commands.BuildFullAutotestTarball(self._build_root,1891 self._current_board,1892 image_dir)1893 commands.ArchiveFile(tarball, archive_path)1894 # Build hwqual image and upload to Google Storage.1895 hwqual_name = 'chromeos-hwqual-%s-%s' % (board, self.version)1896 filename = commands.ArchiveHWQual(buildroot, hwqual_name, archive_path,1897 image_dir)1898 self._release_upload_queue.put([filename])1899 def ArchiveFirmwareImages():1900 """Archive firmware images built from source if available."""1901 archive = commands.BuildFirmwareArchive(buildroot, board, archive_path)1902 if archive:1903 self._release_upload_queue.put([archive])1904 def BuildAndArchiveAllImages():1905 # Generate the recovery image. To conserve loop devices, we try to only1906 # run one instance of build_image at a time. TODO(davidjames): Move the1907 # image generation out of the archive stage.1908 # For recovery image to be generated correctly, BuildRecoveryImage must1909 # run before BuildAndArchiveFactoryImages.1910 if 'base' in config['images']:1911 commands.BuildRecoveryImage(buildroot, board, image_dir, extra_env)1912 self._recovery_image_status_queue.put(True)1913 if config['images']:1914 parallel.RunParallelSteps([BuildAndArchiveFactoryImages,1915 ArchiveHWQual,1916 ArchiveStandaloneTarballs,1917 ArchiveZipFiles])1918 def ArchiveImageScripts():1919 """Archive tarball of generated image manipulation scripts."""1920 target = os.path.join(archive_path, constants.IMAGE_SCRIPTS_TAR)1921 files = glob.glob(os.path.join(image_dir, '*.sh'))1922 files = [os.path.basename(f) for f in files]1923 cros_build_lib.CreateTarball(target, image_dir, inputs=files)1924 self._upload_queue.put([constants.IMAGE_SCRIPTS_TAR])1925 def PushImage():1926 # This helper script is only available on internal manifests currently.1927 if not config['internal']:1928 return1929 # Now that all data has been generated, we can upload the final result to1930 # the image server.1931 # TODO: When we support branches fully, the friendly name of the branch1932 # needs to be used with PushImages1933 sign_types = []1934 if config['name'].endswith('-%s' % cbuildbot_config.CONFIG_TYPE_FIRMWARE):1935 sign_types += ['firmware']1936 commands.PushImages(buildroot,1937 board=board,1938 branch_name='master',1939 archive_url=upload_url,1940 dryrun=debug or not config['push_image'],1941 profile=self._options.profile or config['profile'],1942 sign_types=sign_types)1943 def ArchiveReleaseArtifacts():1944 with self.ArtifactUploader(self._release_upload_queue, archive=False):1945 steps = [ArchiveDebugSymbols, BuildAndArchiveAllImages,1946 ArchiveFirmwareImages]1947 parallel.RunParallelSteps(steps)1948 PushImage()1949 def BuildAndArchiveArtifacts():1950 # Run archiving steps in parallel.1951 steps = [ArchiveReleaseArtifacts]1952 if config['images']:1953 steps.extend(1954 [self.ArchiveStrippedChrome, self.BuildAndArchiveChromeSysroot,1955 self.ArchiveChromeEbuildEnv, ArchiveImageScripts])1956 with parallel.BackgroundTaskRunner(1957 UploadSymbols, queue=self._upload_symbols_queue, processes=1):1958 with self.ArtifactUploader(self._upload_queue, archive=False):1959 parallel.RunParallelSteps(steps)1960 def MarkAsLatest():1961 # Update and upload LATEST file.1962 filename = 'LATEST-%s' % self._target_manifest_branch1963 latest_path = os.path.join(self.bot_archive_root, filename)1964 osutils.WriteFile(latest_path, self.version, mode='w')1965 commands.UploadArchivedFile(1966 self.bot_archive_root, self._GetGSUtilArchiveDir(), filename, debug)1967 try:1968 BuildAndArchiveArtifacts()1969 MarkAsLatest()1970 finally:1971 commands.RemoveOldArchives(self.bot_archive_root,1972 self._options.max_archive_builds)1973 def _HandleStageException(self, exception):1974 # Tell the HWTestStage not to wait for artifacts to be uploaded1975 # in case ArchiveStage throws an exception.1976 self._recovery_image_status_queue.put(False)1977 return super(ArchiveStage, self)._HandleStageException(exception)1978class UploadPrebuiltsStage(BoardSpecificBuilderStage):1979 """Uploads binaries generated by this build for developer use."""1980 option_name = 'prebuilts'1981 config_name = 'prebuilts'1982 def __init__(self, options, build_config, board, archive_stage, suffix=None):1983 super(UploadPrebuiltsStage, self).__init__(options, build_config, board,1984 suffix=suffix)1985 self._archive_stage = archive_stage1986 def GenerateCommonArgs(self):1987 """Generate common prebuilt arguments."""1988 generated_args = []1989 if self._options.debug:1990 generated_args.append('--debug')1991 profile = self._options.profile or self._build_config['profile']1992 if profile:1993 generated_args.extend(['--profile', profile])1994 # Generate the version if we are a manifest_version build.1995 if self._build_config['manifest_version']:1996 assert self._archive_stage, 'Archive stage missing for versioned build.'1997 version = self._archive_stage.GetVersion()1998 generated_args.extend(['--set-version', version])1999 if self._build_config['git_sync']:2000 # Git sync should never be set for pfq type builds.2001 assert not cbuildbot_config.IsPFQType(self._prebuilt_type)2002 generated_args.extend(['--git-sync'])2003 return generated_args2004 @classmethod2005 def _AddOptionsForSlave(cls, builder, board):2006 """Inner helper method to add upload_prebuilts args for a slave builder.2007 Returns:2008 An array of options to add to upload_prebuilts array that allow a master2009 to submit prebuilt conf modifications on behalf of a slave.2010 """2011 args = []2012 builder_config = cbuildbot_config.config[builder]2013 if builder_config['prebuilts']:2014 for slave_board in builder_config['boards']:2015 if builder_config['master'] and slave_board == board:2016 # Ignore self.2017 continue2018 args.extend(['--slave-board', slave_board])2019 slave_profile = builder_config['profile']2020 if slave_profile:2021 args.extend(['--slave-profile', slave_profile])2022 return args2023 def PerformStage(self):2024 """Uploads prebuilts for master and slave builders."""2025 prebuilt_type = self._prebuilt_type2026 board = self._current_board2027 binhosts = []2028 # Whether we publish public prebuilts.2029 public = (self._build_config['prebuilts'] == constants.PUBLIC)2030 # Common args we generate for all types of builds.2031 generated_args = self.GenerateCommonArgs()2032 # Args we specifically add for public/private build types.2033 public_args, private_args = [], []2034 # Public / private builders.2035 public_builders, private_builders = [], []2036 # Distributed builders that use manifest-versions to sync with one another2037 # share prebuilt logic by passing around versions.2038 if cbuildbot_config.IsPFQType(prebuilt_type):2039 # Public pfqs should upload host preflight prebuilts.2040 if prebuilt_type != constants.CHROME_PFQ_TYPE:2041 public_args.append('--sync-host')2042 # Deduplicate against previous binhosts.2043 binhosts.extend(self._GetPortageEnvVar(_PORTAGE_BINHOST, board).split())2044 binhosts.extend(self._GetPortageEnvVar(_PORTAGE_BINHOST, None).split())2045 for binhost in filter(None, binhosts):2046 generated_args.extend(['--previous-binhost-url', binhost])2047 if self._build_config['master'] and board == self._boards[-1]:2048 # The master builder updates all the binhost conf files, and needs to do2049 # so only once so as to ensure it doesn't try to update the same file2050 # more than once. As multiple boards can be built on the same builder,2051 # we arbitrarily decided to update the binhost conf files when we run2052 # upload_prebuilts for the last board. The other boards are treated as2053 # slave boards.2054 generated_args.append('--sync-binhost-conf')2055 for c in self._GetSlavesForMaster(self._build_config):2056 if c['prebuilts'] == constants.PUBLIC:2057 public_builders.append(c['name'])2058 public_args.extend(self._AddOptionsForSlave(c['name'], board))2059 elif c['prebuilts'] == constants.PRIVATE:2060 private_builders.append(c['name'])2061 private_args.extend(self._AddOptionsForSlave(c['name'], board))2062 # Upload the public prebuilts, if any.2063 if public_builders or public:2064 public_board = board if public else None2065 commands.UploadPrebuilts(2066 category=prebuilt_type, chrome_rev=self._chrome_rev,2067 private_bucket=False, buildroot=self._build_root,2068 board=public_board, extra_args=generated_args + public_args)2069 # Upload the private prebuilts, if any.2070 if private_builders or not public:2071 private_board = board if not public else None2072 commands.UploadPrebuilts(2073 category=prebuilt_type, chrome_rev=self._chrome_rev,2074 private_bucket=True, buildroot=self._build_root, board=private_board,2075 extra_args=generated_args + private_args)2076class DevInstallerPrebuiltsStage(UploadPrebuiltsStage):2077 config_name = 'dev_installer_prebuilts'2078 def PerformStage(self):2079 generated_args = generated_args = self.GenerateCommonArgs()2080 commands.UploadDevInstallerPrebuilts(2081 binhost_bucket=self._build_config['binhost_bucket'],2082 binhost_key=self._build_config['binhost_key'],2083 binhost_base_url=self._build_config['binhost_base_url'],2084 buildroot=self._build_root,2085 board=self._current_board,2086 extra_args=generated_args)2087class PublishUprevChangesStage(NonHaltingBuilderStage):2088 """Makes uprev changes from pfq live for developers."""2089 def PerformStage(self):2090 _, push_overlays = self._ExtractOverlays()2091 if push_overlays:2092 commands.UprevPush(self._build_root, push_overlays, self._options.debug)2093class ReportStage(bs.BuilderStage):2094 """Summarize all the builds."""2095 _HTML_HEAD = """<html>2096<head>2097 <title>Archive Index: %(board)s / %(version)s</title>2098</head>2099<body>2100<h2>Artifacts Index: %(board)s / %(version)s (%(config)s config)</h2>"""2101 def __init__(self, options, build_config, archive_stages, version):2102 bs.BuilderStage.__init__(self, options, build_config)2103 self._archive_stages = archive_stages2104 self._version = version if version else ''2105 def PerformStage(self):2106 acl = None if self._build_config['internal'] else 'public-read'2107 archive_urls = {}2108 for board_config, archive_stage in sorted(self._archive_stages.iteritems()):2109 board = board_config.board2110 head_data = {2111 'board': board,2112 'config': board_config.name,2113 'version': archive_stage.version,2114 }2115 head = self._HTML_HEAD % head_data2116 url = archive_stage.download_url2117 path = archive_stage.archive_path2118 upload_url = archive_stage.upload_url2119 # Generate the final metadata before we look at the uploaded list.2120 if results_lib.Results.BuildSucceededSoFar():2121 final_status = 'passed'2122 else:2123 final_status = 'failed'2124 archive_stage.RefreshMetadata('', final_status=final_status)2125 # Generate the index page needed for public reading.2126 uploaded = os.path.join(path, commands.UPLOADED_LIST_FILENAME)2127 if not os.path.exists(uploaded):2128 if (not self._build_config['compilecheck'] and2129 not self._options.compilecheck):2130 # UPLOADED doesn't exist. Normal if buildboard failed.2131 logging.warning('board %s did not make it to the archive stage; '2132 'skipping', board)2133 continue2134 files = osutils.ReadFile(uploaded).splitlines() + [2135 '.|Google Storage Index',2136 '..|',2137 ]2138 index = os.path.join(path, 'index.html')2139 commands.GenerateHtmlIndex(index, files, url_base=url, head=head)2140 commands.UploadArchivedFile(path, upload_url, os.path.basename(index),2141 debug=archive_stage.debug, acl=acl)2142 archive_urls[board] = archive_stage.download_url + '/index.html'2143 results_lib.Results.Report(sys.stdout, archive_urls=archive_urls,...

Full Screen

Full Screen

builder.py

Source:builder.py Github

copy

Full Screen

...78 _cfg = {"sources": list(self.src_master.keys()),79 "gene_root": ['entrez_gene', 'ensembl_gene']}80 self._build_config = _cfg81 return _cfg82 def load_build_config(self, build):83 '''Load build config from src_build collection.'''84 src_build = get_src_build()85 self.src_build = src_build86 _cfg = src_build.find_one({'_id': build})87 if _cfg:88 self._build_config = _cfg89 else:90 raise ValueError('Cannot find build config named "%s"' % build)91 return _cfg92 def log_src_build(self, dict):93 '''put logging dictionary into the corresponding doc in src_build collection.94 if build_config is not loaded from src_build, nothing will be logged.95 '''96 src_build = getattr(self, 'src_build', None)97 if src_build:98 _cfg = src_build.find_one({'_id': self._build_config['_id']})99 _cfg['build'][-1].update(dict)100 src_build.update({'_id': self._build_config['_id']}, {"$set": {'build': _cfg['build']}})101 def log_building_start(self):102 if self.merge_logging:103 #setup logging104 logfile = 'databuild_{}_{}.log'.format('genedoc' + '_' + self._build_config['name'],105 time.strftime('%Y%m%d'))106 logfile = os.path.join(self.log_folder, logfile)107 setup_logfile(logfile)108 src_build = getattr(self, 'src_build', None)109 if src_build:110 #src_build.update({'_id': self._build_config['_id']}, {"$unset": {"build": ""}})111 d = {'status': 'building',112 'started_at': datetime.now(),113 'logfile': logfile,114 'target_backend': self.target.name}115 if self.target.name == 'mongodb':116 d['target'] = self.target.target_collection.name117 elif self.target.name == 'es':118 d['target'] = self.target.target_esidxer.ES_INDEX_NAME119 logging.info(pformat(d))120 src_build.update({'_id': self._build_config['_id']}, {"$push": {'build': d}})121 _cfg = src_build.find_one({'_id': self._build_config['_id']})122 if len(_cfg['build']) > self.max_build_status:123 #remove the first build status record124 src_build.update({'_id': self._build_config['_id']}, {"$pop": {'build': -1}})125 def _get_target_name(self):126 return 'genedoc_{}_{}_{}'.format(self._build_config['name'],127 get_timestamp(), get_random_string()).lower()128 def prepare_target(self, target_name=None):129 '''call self.update_backend() after validating self._build_config.'''130 if self.target.name == 'mongodb':131 _db = get_target_db()132 target_collection_name = target_name or self._get_target_name()133 self.target.target_collection = _db[target_collection_name]134 logging.info("Target: %s" % repr(target_collection_name))135 elif self.target.name == 'es':136 self.target.target_esidxer.ES_INDEX_NAME = target_name or self._get_target_name()137 self.target.target_esidxer._mapping = self.get_mapping()138 elif self.target.name == 'couchdb':139 self.target.db_name = target_name or ('genedoc' + '_' + self._build_config['name'])140 elif self.target.name == 'memory':141 self.target.target_name = target_name or ('genedoc' + '_' + self._build_config['name'])142 def get_src_master(self):143 src_master = get_src_master(self.src.client)144 self.src_master = dict([(src['_id'], src) for src in list(src_master.find())])145 def validate_src_collections(self,collection_list=None):146 if not collection_list:147 collection_list = set(self.src.collection_names())148 self.get_src_master()149 build_conf_src = self._build_config['sources']150 else:151 build_conf_src = collection_list152 logging.info("Sources: %s" % repr(build_conf_src))153 if self._build_config:154 for src in build_conf_src:155 assert src in self.src_master, '"%s" not found in "src_master"' % src156 assert src in collection_list, '"%s" not an existing collection in "%s"' % (src, self.src.name)157 else:158 raise ValueError('"build_config" cannot be empty.')159 def _load_entrez_geneid_d(self):160 self._entrez_geneid_d = loadobj(("entrez_gene__geneid_d.pyobj", self.src), mode='gridfs')161 def _load_ensembl2entrez_li(self):162 ensembl2entrez_li = loadobj(("ensembl_gene__2entrezgene_list.pyobj", self.src), mode='gridfs')163 #filter out those deprecated entrez gene ids164 logging.info(len(ensembl2entrez_li))165 ensembl2entrez_li = [(ensembl_id, self._entrez_geneid_d[int(entrez_id)]) for (ensembl_id, entrez_id) in ensembl2entrez_li166 if int(entrez_id) in self._entrez_geneid_d]167 logging.info(len(ensembl2entrez_li))168 ensembl2entrez = list2dict(ensembl2entrez_li, 0)169 self._idmapping_d_cache['ensembl_gene'] = ensembl2entrez170 def _save_idmapping_gridfs(self):171 '''saving _idmapping_d_cache into gridfs.'''172 idmapping_gridfs_d = {}173 if self._idmapping_d_cache:174 for id_type in self._idmapping_d_cache:175 filename = 'tmp_idmapping_d_cache_' + id_type176 dump2gridfs(self._idmapping_d_cache[id_type], filename, self.src)177 idmapping_gridfs_d[id_type] = filename178 return idmapping_gridfs_d179 def make_genedoc_root(self):180 if not self._entrez_geneid_d:181 self._load_entrez_geneid_d()182 if 'ensembl_gene' in self._build_config['gene_root']:183 self._load_ensembl2entrez_li()184 ensembl2entrez = self._idmapping_d_cache['ensembl_gene']185 if "species" in self._build_config:186 _query = {'taxid': {'$in': self._build_config['species']}}187 elif "species_to_exclude" in self._build_config:188 _query = {'taxid': {'$nin': self._build_config['species_to_exclude']}}189 else:190 _query = None191 geneid_set = []192 species_set = set()193 if "entrez_gene" in self._build_config['gene_root']:194 for doc_li in doc_feeder(self.src['entrez_gene'], inbatch=True, step=self.step, query=_query):195 #target_collection.insert(doc_li, manipulate=False, check_keys=False)196 self.target.insert(doc_li)197 geneid_set.extend([doc['_id'] for doc in doc_li])198 species_set |= set([doc['taxid'] for doc in doc_li])199 cnt_total_entrez_genes = len(geneid_set)200 cnt_total_species = len(species_set)201 logging.info('# of entrez Gene IDs in total: %d' % cnt_total_entrez_genes)202 logging.info('# of species in total: %d' % cnt_total_species)203 if "ensembl_gene" in self._build_config['gene_root']:204 cnt_ensembl_only_genes = 0205 cnt_total_ensembl_genes = 0206 for doc_li in doc_feeder(self.src['ensembl_gene'], inbatch=True, step=self.step, query=_query):207 _doc_li = []208 for _doc in doc_li:209 cnt_total_ensembl_genes += 1210 ensembl_id = _doc['_id']211 entrez_gene = ensembl2entrez.get(ensembl_id, None)212 if entrez_gene is None:213 #this is an Ensembl only gene214 _doc_li.append(_doc)215 cnt_ensembl_only_genes += 1216 geneid_set.append(_doc['_id'])217 if _doc_li:218 #target_collection.insert(_doc_li, manipulate=False, check_keys=False)219 self.target.insert(_doc_li)220 cnt_matching_ensembl_genes = cnt_total_ensembl_genes - cnt_ensembl_only_genes221 logging.info('# of ensembl Gene IDs in total: %d' % cnt_total_ensembl_genes)222 logging.info('# of ensembl Gene IDs match entrez Gene IDs: %d' % cnt_matching_ensembl_genes)223 logging.info('# of ensembl Gene IDs DO NOT match entrez Gene IDs: %d' % cnt_ensembl_only_genes)224 geneid_set = set(geneid_set)225 logging.info('# of total Root Gene IDs: %d' % len(geneid_set))226 _stats = {'total_entrez_genes': cnt_total_entrez_genes,227 'total_species': cnt_total_species,228 'total_ensembl_genes': cnt_total_ensembl_genes,229 'total_ensembl_genes_mapped_to_entrez': cnt_matching_ensembl_genes,230 'total_ensembl_only_genes': cnt_ensembl_only_genes,231 'total_genes': len(geneid_set)}232 self._stats = _stats233 self._src_version = self.get_src_version()234 self.log_src_build({'stats': _stats, 'src_version': self._src_version})235 return geneid_set236 def get_idmapping_d(self, src):237 if src in self._idmapping_d_cache:238 return self._idmapping_d_cache[src]239 else:240 self._load_ensembl2entrez_li()241 return self._idmapping_d_cache[src]242 #raise ValueError('cannot load "idmapping_d" for "%s"' % src)243 def merge(self, step=100000, restart_at=0,sources=None,target=None):244 t0 = time.time()245 self.validate_src_collections(sources)246 self.prepare_target(target_name=target)247 self.log_building_start()248 try:249 if self.using_ipython_cluster:250 if sources:251 raise NotImplemented("merge speficic sources not supported when using parallel")252 self._merge_ipython_cluster(step=step)253 else:254 self._merge_local(step=step, restart_at=restart_at,src_collection_list=sources)255 if self.target.name == 'es':256 logging.info("Updating metadata...")257 self.update_mapping_meta()258 t1 = round(time.time() - t0, 0)259 t = timesofar(t0)260 self.log_src_build({'status': 'success',261 'time': t,262 'time_in_s': t1,263 'timestamp': datetime.now()})264 finally:265 #do a simple validation here266 if getattr(self, '_stats', None):267 logging.info("Validating...")268 target_cnt = self.target.count()269 if target_cnt == self._stats['total_genes']:270 logging.info("OK [total count={}]".format(target_cnt))271 else:272 logging.info("Warning: total count of gene documents does not match [{}, should be {}]".format(target_cnt, self._stats['total_genes']))273 if self.merge_logging:274 sys.stdout.close()275 def merge_resume(self, build_config, at_collection, step=10000):276 '''resume a merging process after a failure.277 .merge_resume('mygene_allspecies', 'reporter')278 '''279 assert not self.using_ipython_cluster, "Abort. Can only resume merging in non-parallel mode."280 self.load_build_config(build_config)281 last_build = self._build_config['build'][-1]282 logging.info("Last build record:")283 logging.info(pformat(last_build))284 assert last_build['status'] == 'building', \285 "Abort. Last build does not need to be resumed."286 assert at_collection in self._build_config['sources'], \287 'Abort. Cannot resume merging from a unknown collection "{}"'.format(at_collection)288 assert last_build['target_backend'] == self.target.name, \289 'Abort. Re-initialized DataBuilder class using matching backend "{}"'.format(last_build['backend'])290 assert last_build.get('stats', None), \291 'Abort. Intital build stats are not available. You should restart the build from the scratch.'292 self._stats = last_build['stats']293 if ask('Continue to resume merging from "{}"?'.format(at_collection)) == 'Y':294 #TODO: resume logging295 target_name = last_build['target']296 self.validate_src_collections()297 self.prepare_target(target_name=target_name)298 src_cnt = 0299 for collection in self._build_config['sources']:300 if collection in ['entrez_gene', 'ensembl_gene']:301 continue302 src_cnt += 1303 if collection == at_collection:304 break305 self._merge_local(step=step, restart_at=src_cnt)306 if self.target.name == 'es':307 logging.info("Updating metadata...")308 self.update_mapping_meta()309 self.log_src_build({'status': 'success',310 'timestamp': datetime.now()})311 def _merge_ipython_cluster(self, step=100000):312 '''Do the merging on ipython cluster.'''313 from ipyparallel import Client, require314 from config import CLUSTER_CLIENT_JSON315 t0 = time.time()316 src_collection_list = [collection for collection in self._build_config['sources']317 if collection not in ['entrez_gene', 'ensembl_gene']]318 self.target.drop()319 self.target.prepare()320 geneid_set = self.make_genedoc_root()321 idmapping_gridfs_d = self._save_idmapping_gridfs()322 logging.info(timesofar(t0))323 rc = Client(CLUSTER_CLIENT_JSON)324 lview = rc.load_balanced_view()325 logging.info("\t# nodes in use: {}".format(len(lview.targets or rc.ids)))326 lview.block = False327 kwargs = {}328 target_collection = self.target.target_collection329 kwargs['server'], kwargs['port'] = target_collection.database.client.address330 kwargs['src_db'] = self.src.name331 kwargs['target_db'] = target_collection.database.name332 kwargs['target_collection_name'] = target_collection.name333 kwargs['limit'] = step334 @require('pymongo', 'time', 'types')335 def worker(kwargs):336 server = kwargs['server']337 port = kwargs['port']338 src_db = kwargs['src_db']339 target_db = kwargs['target_db']340 target_collection_name = kwargs['target_collection_name']341 src_collection = kwargs['src_collection']342 skip = kwargs['skip']343 limit = kwargs['limit']344 def load_from_gridfs(filename, db):345 import gzip346 import pickle347 import gridfs348 fs = gridfs.GridFS(db)349 fobj = fs.get(filename)350 gzfobj = gzip.GzipFile(fileobj=fobj)351 try:352 object = pickle.load(gzfobj)353 finally:354 gzfobj.close()355 fobj.close()356 return object357 def alwayslist(value):358 if value is None:359 return []360 if isinstance(value, (list, tuple)):361 return value362 else:363 return [value]364 conn = pymongo.MongoClient(server, port)365 src = conn[src_db]366 target_collection = conn[target_db][target_collection_name]367 idmapping_gridfs_name = kwargs.get('idmapping_gridfs_name', None)368 if idmapping_gridfs_name:369 idmapping_d = load_from_gridfs(idmapping_gridfs_name, src)370 else:371 idmapping_d = None372 cur = src[src_collection].find(skip=skip, limit=limit, timeout=False)373 cur.batch_size(1000)374 try:375 for doc in cur:376 _id = doc['_id']377 if idmapping_d:378 _id = idmapping_d.get(_id, None) or _id379 # there could be cases that idmapping returns multiple entrez_gene id.380 for __id in alwayslist(_id): 381 __id = str(__id)382 doc.pop('_id', None)383 doc.pop('taxid', None)384 target_collection.update({'_id': __id}, doc, manipulate=False, upsert=False)385 #target_collection.update({'_id': __id}, {'$set': doc},386 finally:387 cur.close()388 t0 = time.time()389 task_list = []390 for src_collection in src_collection_list:391 _kwargs = copy.copy(kwargs)392 _kwargs['src_collection'] = src_collection393 id_type = self.src_master[src_collection].get('id_type', None)394 if id_type:395 idmapping_gridfs_name = idmapping_gridfs_d[id_type]396 _kwargs['idmapping_gridfs_name'] = idmapping_gridfs_name397 cnt = self.src[src_collection].count()398 for s in range(0, cnt, step):399 __kwargs = copy.copy(_kwargs)400 __kwargs['skip'] = s401 task_list.append(__kwargs)402 logging.info("\t# of tasks: {}".format(len(task_list)))403 logging.info("\tsubmitting...")404 job = lview.map_async(worker, task_list)405 logging.info("done.")406 job.wait_interactive()407 logging.info("\t# of results returned: {}".format(len(job.result())))408 logging.info("\ttotal time: {}".format(timesofar(t0)))409 if self.shutdown_ipengines_after_done:410 logging.info("\tshuting down all ipengine nodes...")411 lview.shutdown()412 logging.info('Done.')413 def _merge_local(self, step=100000, restart_at=0, src_collection_list=None):414 if restart_at == 0 and src_collection_list is None:415 self.target.drop()416 self.target.prepare()417 geneid_set = self.make_genedoc_root()418 else:419 if not self._entrez_geneid_d:420 self._load_entrez_geneid_d()421 #geneid_set = set([x['_id'] for x in target_collection.find(projection=[], manipulate=False)])422 geneid_set = set(self.target.get_id_list())423 logging.info('\t%s' % len(geneid_set))424 if not src_collection_list:425 src_collection_list = self._build_config['sources']426 src_cnt = 0427 for collection in src_collection_list:428 if collection in ['entrez_gene', 'ensembl_gene']:429 continue430 src_cnt += 1431 id_type = self.src_master[collection].get('id_type', None)432 flag_need_id_conversion = id_type is not None433 if flag_need_id_conversion:434 idmapping_d = self.get_idmapping_d(id_type)435 else:436 idmapping_d = None437 if restart_at <= src_cnt:438 if self.use_parallel:439 self.doc_queue = []440 self._merge_parallel_ipython(collection, geneid_set,441 step=step, idmapping_d=idmapping_d)442 else:443 self._merge_sequential(collection, geneid_set,444 step=step, idmapping_d=idmapping_d)445 self.target.finalize()446 def _merge_sequential(self, collection, geneid_set, step=100000, idmapping_d=None):447 for doc in doc_feeder(self.src[collection], step=step):448 _id = doc['_id']449 if idmapping_d:450 _id = idmapping_d.get(_id, None) or _id451 for __id in alwayslist(_id): # there could be cases that idmapping returns multiple entrez_gene ids.452 __id = str(__id)453 if __id in geneid_set:454 doc.pop('_id', None)455 doc.pop('taxid', None)456 # target_collection.update({'_id': __id}, {'$set': doc},457 # manipulate=False,458 # upsert=False) #,safe=True)459 self.target.update(__id, doc)460 def _merge_parallel(self, collection, geneid_set, step=100000, idmapping_d=None):461 from multiprocessing import Process, Queue462 NUMBER_OF_PROCESSES = 8463 input_queue = Queue()464 input_queue.conn_pool = []465 def worker(q, target):466 while True:467 doc = q.get()468 if doc == 'STOP':469 break470 __id = doc.pop('_id')471 doc.pop('taxid', None)472 target.update(__id, doc)473 # target_collection.update({'_id': __id}, {'$set': doc},474 # manipulate=False,475 # upsert=False) #,safe=True)476 # Start worker processes477 for i in range(NUMBER_OF_PROCESSES):478 Process(target=worker, args=(input_queue, self.target)).start()479 for doc in doc_feeder(self.src[collection], step=step):480 _id = doc['_id']481 if idmapping_d:482 _id = idmapping_d.get(_id, None) or _id483 for __id in alwayslist(_id): # there could be cases that idmapping returns multiple entrez_gene ids.484 __id = str(__id)485 if __id in geneid_set:486 doc['_id'] = __id487 input_queue.put(doc)488 # Tell child processes to stop489 for i in range(NUMBER_OF_PROCESSES):490 input_queue.put('STOP')491 def _merge_parallel_ipython(self, collection, geneid_set, step=100000, idmapping_d=None):492 from IPython.parallel import Client, require493 rc = Client()494 dview = rc[:]495 #dview = rc.load_balanced_view()496 dview.block = False497 target_collection = self.target.target_collection498 dview['server'], dview['port'] = target_collection.database.client.address499 dview['database'] = target_collection.database.name500 dview['collection_name'] = target_collection.name501 def partition(lst, n):502 q, r = divmod(len(lst), n)503 indices = [q * i + min(i, r) for i in range(n + 1)]504 return [lst[indices[i]:indices[i + 1]] for i in range(n)]505 @require('pymongo', 'time')506 def worker(doc_li):507 conn = pymongo.MongoClient(server, port)508 target_collection = conn[database][collection_name]509 t0 = time.time()510 for doc in doc_li:511 __id = doc.pop('_id')512 doc.pop('taxid', None)513 target_collection.update({'_id': __id}, {'$set': doc},514 manipulate=False,515 upsert=False) # ,safe=True)516 logging.info('Done. [%.1fs]' % (time.time() - t0))517 for doc in doc_feeder(self.src[collection], step=step):518 _id = doc['_id']519 if idmapping_d:520 _id = idmapping_d.get(_id, None) or _id521 for __id in alwayslist(_id): # there could be cases that idmapping returns multiple entrez_gene ids.522 __id = str(__id)523 if __id in geneid_set:524 doc['_id'] = __id525 self.doc_queue.append(doc)526 if len(self.doc_queue) >= step:527 #dview.scatter('doc_li', self.doc_queue)528 #dview.apply_async(worker)529 dview.map_async(worker, partition(self.doc_queue, len(rc.ids)))530 self.doc_queue = []531 logging.info("!")532 def get_src_version(self):533 src_dump = get_src_dump(self.src.client)534 src_version = {}535 for src in src_dump.find():536 version = src.get('release', src.get('timestamp', None))537 if version:538 src_version[src['_id']] = version539 return src_version540 def get_last_src_build_stats(self):541 src_build = getattr(self, 'src_build', None)542 if src_build:543 _cfg = src_build.find_one({'_id': self._build_config['_id']})544 if _cfg['build'][-1].get('status', None) == 'success' and \545 _cfg['build'][-1].get('stats', None):546 stats = _cfg['build'][-1]['stats']547 return stats548 def get_target_collection(self):549 '''get the lastest target_collection from src_build record.'''550 src_build = getattr(self, 'src_build', None)551 if src_build:552 _cfg = src_build.find_one({'_id': self._build_config['_id']})553 if _cfg['build'][-1].get('status', None) == 'success' and \554 _cfg['build'][-1].get('target', None):555 target_collection = _cfg['build'][-1]['target']556 _db = get_target_db()557 target_collection = _db[target_collection]558 return target_collection559 def pick_target_collection(self, autoselect=True):560 '''print out a list of available target_collection, let user to pick one.'''561 target_db = get_target_db()562 target_collection_prefix = 'genedoc_' + self._build_config['name']563 target_collection_list = [target_db[name] for name in sorted(target_db.collection_names()) if name.startswith(target_collection_prefix)]564 if target_collection_list:565 logging.info("Found {} target collections:".format(len(target_collection_list)))566 logging.info('\n'.join(['\t{0:<5}{1.name:<45}\t{2}'.format(567 str(i + 1) + ':', target, target.count()) for (i, target) in enumerate(target_collection_list)]))568 logging.info()569 while 1:570 if autoselect:571 selected_idx = input("Pick one above [{}]:".format(len(target_collection_list)))572 else:573 selected_idx = input("Pick one above:")574 if autoselect:575 selected_idx = selected_idx or len(target_collection_list)576 try:577 selected_idx = int(selected_idx)578 break579 except ValueError:580 continue581 return target_collection_list[selected_idx - 1]582 else:583 logging.info("Found no target collections.")584 def get_mapping(self, enable_timestamp=True):585 '''collect mapping data from data sources.586 This is for GeneDocESBackend only.587 '''588 mapping = {}589 src_master = get_src_master(self.src.client)590 for collection in self._build_config['sources']:591 meta = src_master.find_one({"_id" : collection})592 if 'mapping' in meta:593 mapping.update(meta['mapping'])594 else:595 logging.info('Warning: "%s" collection has no mapping data.' % collection)596 mapping = {"properties": mapping,597 "dynamic": False}598 if enable_timestamp:599 mapping['_timestamp'] = {600 "enabled": True,601 }602 #allow source Compression603 #Note: no need of source compression due to "Store Level Compression"604 #mapping['_source'] = {'compress': True,}605 # 'compress_threshold': '1kb'}606 return mapping607 def update_mapping_meta(self):608 '''updating _meta field of ES mapping data, including index stats, versions.609 This is for GeneDocESBackend only.610 '''611 _meta = {}612 src_version = self.get_src_version()613 if src_version:614 _meta['src_version'] = src_version615 if getattr(self, '_stats', None):616 _meta['stats'] = self._stats617 if _meta:618 self.target.target_esidxer.update_mapping_meta({'_meta': _meta})619 def validate(self, build_config='mygene_allspecies', n=10):620 '''Validate merged genedoc, currently for ES backend only.'''621 import random622 import itertools623 import pyes624 self.load_build_config(build_config)625 last_build = self._build_config['build'][-1]626 logging.info("Last build record:")627 logging.info(pformat(last_build))628 #assert last_build['target_backend'] == 'es', '"validate" currently works for "es" backend only'629 target_name = last_build['target']630 self.validate_src_collections()631 self.prepare_target(target_name=target_name)632 logging.info("Validating...")633 target_cnt = self.target.count()634 stats_cnt = last_build['stats']['total_genes']635 if target_cnt == stats_cnt:636 logging.info("OK [total count={}]".format(target_cnt))637 else:638 logging.info("Warning: total count of gene documents does not match [{}, should be {}]".format(target_cnt, stats_cnt))639 if n > 0:640 for src in self._build_config['sources']:641 logging.info("\nSrc: %s" % src)642 # if 'id_type' in self.src_master[src] and self.src_master[src]['id_type'] != 'entrez_gene':643 # print "skipped."644 # continue645 cnt = self.src[src].count()646 fdr1 = doc_feeder(self.src[src], step=10000, s=cnt - n)647 rand_s = random.randint(0, cnt - n)648 fdr2 = doc_feeder(self.src[src], step=n, s=rand_s, e=rand_s + n)649 _first_exception = True650 for doc in itertools.chain(fdr1, fdr2):651 _id = doc['_id']652 try:653 es_doc = self.target.get_from_id(_id)654 except pyes.exceptions.NotFoundException:655 if _first_exception:656 logging.info()657 _first_exception = False658 logging.info("%s not found." % _id)659 continue660 for k in doc:661 if src == 'entrez_homologene' and k == 'taxid':662 # there is occasionally known error for taxid in homologene data.663 continue664 assert es_doc.get(k, None) == doc[k], (_id, k, es_doc.get(k, None), doc[k])665 def build_index(self, use_parallel=True):666 target_collection = self.get_target_collection()667 if target_collection:668 es_idxer = ESIndexer(mapping=self.get_mapping())669 es_idxer.ES_INDEX_NAME = 'genedoc_' + self._build_config['name']670 es_idxer.step = 10000671 es_idxer.use_parallel = use_parallel672 #es_idxer.s = 609000673 #es_idxer.conn.indices.delete_index(es_idxer.ES_INDEX_NAME)674 es_idxer.create_index()675 es_idxer.delete_index_type(es_idxer.ES_INDEX_TYPE, noconfirm=True)676 es_idxer.build_index(target_collection, verbose=False)677 es_idxer.optimize()678 else:679 logging.info("Error: target collection is not ready yet or failed to build.")680 def build_index2(self, build_config='mygene_allspecies', last_build_idx=-1, use_parallel=False, es_host=None, es_index_name=None, noconfirm=False):681 """Build ES index from last successfully-merged mongodb collection.682 optional "es_host" argument can be used to specified another ES host, otherwise default ES_HOST.683 optional "es_index_name" argument can be used to pass an alternative index name, otherwise same as mongodb collection name684 """685 self.load_build_config(build_config)686 assert "build" in self._build_config, "Abort. No such build records for config %s" % build_config687 last_build = self._build_config['build'][last_build_idx]688 logging.info("Last build record:")689 logging.info(pformat(last_build))690 assert last_build['status'] == 'success', \691 "Abort. Last build did not success."692 assert last_build['target_backend'] == "mongodb", \693 'Abort. Last build need to be built using "mongodb" backend.'694 assert last_build.get('stats', None), \695 'Abort. Last build stats are not available.'696 self._stats = last_build['stats']697 assert last_build.get('target', None), \698 'Abort. Last build target_collection is not available.'699 # Get the source collection to build the ES index700 # IMPORTANT: the collection in last_build['target'] does not contain _timestamp field,701 # only the "genedoc_*_current" collection does. When "timestamp" is enabled702 # in mappings, last_build['target'] collection won't be indexed by ES correctly,703 # therefore, we use "genedoc_*_current" collection as the source here:704 #target_collection = last_build['target']705 target_collection = "genedoc_{}_current".format(build_config)706 _db = get_target_db()707 target_collection = _db[target_collection]708 logging.info("")709 logging.info('Source: %s' % target_collection.name)710 _mapping = self.get_mapping()711 _meta = {}712 src_version = self.get_src_version()713 if src_version:714 _meta['src_version'] = src_version715 if getattr(self, '_stats', None):716 _meta['stats'] = self._stats717 if 'timestamp' in last_build:718 _meta['timestamp'] = last_build['timestamp']719 if _meta:720 _mapping['_meta'] = _meta721 es_index_name = es_index_name or target_collection.name722 es_idxer = ESIndexer(mapping=_mapping,723 es_index_name=es_index_name,724 es_host=es_host,725 step=5000)726 if build_config == 'mygene_allspecies':727 es_idxer.number_of_shards = 10 # default 5728 es_idxer.check()729 if noconfirm or ask("Continue to build ES index?") == 'Y':730 es_idxer.use_parallel = use_parallel731 #es_idxer.s = 609000732 if es_idxer.exists_index(es_idxer.ES_INDEX_NAME):733 if noconfirm or ask('Index "{}" exists. Delete?'.format(es_idxer.ES_INDEX_NAME)) == 'Y':734 es_idxer.conn.indices.delete(es_idxer.ES_INDEX_NAME)735 else:736 logging.info("Abort.")737 return738 es_idxer.create_index()739 #es_idxer.delete_index_type(es_idxer.ES_INDEX_TYPE, noconfirm=True)740 es_idxer.build_index(target_collection, verbose=False)741 # time.sleep(10) # pausing 10 second here742 # if es_idxer.wait_till_all_shards_ready():743 # print "Optimizing...", es_idxer.optimize()744 def sync_index(self, use_parallel=True):745 from utils import diff746 sync_src = self.get_target_collection()747 es_idxer = ESIndexer(self.get_mapping())748 es_idxer.ES_INDEX_NAME = sync_src.target_collection.name749 es_idxer.step = 10000750 es_idxer.use_parallel = use_parallel751 sync_target = databuild.backend.GeneDocESBackend(es_idxer)752 changes = diff.diff_collections(sync_src, sync_target)753 return changes754def main():755 if len(sys.argv) > 1:756 config = sys.argv[1]757 else:758 config = 'mygene_allspecies'759 use_parallel = '-p' in sys.argv760 sources = None # will build all sources761 target = None # will generate a new collection name762 # "target_col:src_col1,src_col2" will specifically merge src_col1763 # and src_col2 into existing target_col (instead of merging everything)764 if not use_parallel and len(sys.argv) > 2:765 target,tmp = sys.argv[2].split(":")766 sources = tmp.split(",")767 t0 = time.time()768 bdr = DataBuilder(backend='mongodb')769 bdr.load_build_config(config)770 bdr.using_ipython_cluster = use_parallel771 bdr.merge(sources=sources,target=target)772 logging.info("Finished. %s" % timesofar(t0))773if __name__ == '__main__':...

Full Screen

Full Screen

builderstage.py

Source:builderstage.py Github

copy

Full Screen

1# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.2# Use of this source code is governed by a BSD-style license that can be3# found in the LICENSE file.4"""Module containing the base class for the stages that a builder runs."""5import copy6import os7import re8import sys9import time10import traceback11# We import mox so that we can identify mox exceptions and pass them through12# in our exception handling code.13try:14 import mox15except ImportError:16 mox = None17from chromite.buildbot import cbuildbot_config18from chromite.buildbot import cbuildbot_results as results_lib19from chromite.buildbot import portage_utilities20from chromite.buildbot import validation_pool21from chromite.lib import cros_build_lib22class BuilderStage(object):23 """Parent class for stages to be performed by a builder."""24 name_stage_re = re.compile(r'(\w+)Stage')25 # TODO(sosa): Remove these once we have a SEND/RECIEVE IPC mechanism26 # implemented.27 overlays = None28 push_overlays = None29 # Class variable that stores the branch to build and test30 _target_manifest_branch = None31 # Class should set this if they have a corresponding no<stage> option that32 # skips their stage.33 option_name = None34 # Class should set this if they have a corresponding setting in35 # self._build_config that skips their stage.36 config_name = None37 @staticmethod38 def SetManifestBranch(branch):39 BuilderStage._target_manifest_branch = branch40 @classmethod41 def StageNamePrefix(cls):42 return cls.name_stage_re.match(cls.__name__).group(1)43 def __init__(self, options, build_config, suffix=None):44 self._options = options45 self._bot_id = build_config['name']46 if not self._options.archive_base and self._options.remote_trybot:47 self._bot_id = 'trybot-' + self._bot_id48 self._build_config = copy.deepcopy(build_config)49 self.name = self.StageNamePrefix()50 if suffix:51 self.name += suffix52 self._boards = self._build_config['boards']53 self._build_root = os.path.abspath(self._options.buildroot)54 self._prebuilt_type = None55 if self._options.prebuilts and self._build_config['prebuilts']:56 self._prebuilt_type = self._build_config['build_type']57 # Determine correct chrome_rev.58 self._chrome_rev = self._build_config['chrome_rev']59 if self._options.chrome_rev:60 self._chrome_rev = self._options.chrome_rev61 def ConstructDashboardURL(self, stage=None):62 """Return the dashboard URL63 This is the direct link to buildbot logs as seen in build.chromium.org64 Args:65 stage: Link to a specific |stage|, otherwise the general buildbot log66 Returns:67 The fully formed URL68 """69 return validation_pool.ValidationPool.ConstructDashboardURL(70 self._build_config['overlays'], self._options.remote_trybot,71 self._build_config['name'], self._options.buildnumber, stage=stage)72 def _ExtractOverlays(self):73 """Extracts list of overlays into class."""74 overlays = portage_utilities.FindOverlays(75 self._build_config['overlays'], buildroot=self._build_root)76 push_overlays = portage_utilities.FindOverlays(77 self._build_config['push_overlays'], buildroot=self._build_root)78 # Sanity checks.79 # We cannot push to overlays that we don't rev.80 assert set(push_overlays).issubset(set(overlays))81 # Either has to be a master or not have any push overlays.82 assert self._build_config['master'] or not push_overlays83 return overlays, push_overlays84 def _Print(self, msg):85 """Prints a msg to stderr."""86 sys.stdout.flush()87 print >> sys.stderr, msg88 sys.stderr.flush()89 def _PrintLoudly(self, msg):90 """Prints a msg with loudly."""91 border_line = '*' * 6092 edge = '*' * 293 sys.stdout.flush()94 print >> sys.stderr, border_line95 msg_lines = msg.split('\n')96 # If the last line is whitespace only drop it.97 if not msg_lines[-1].rstrip():98 del msg_lines[-1]99 for msg_line in msg_lines:100 print >> sys.stderr, '%s %s' % (edge, msg_line)101 print >> sys.stderr, border_line102 sys.stderr.flush()103 def _GetPortageEnvVar(self, envvar, board):104 """Get a portage environment variable for the configuration's board.105 envvar: The environment variable to get. E.g. 'PORTAGE_BINHOST'.106 Returns:107 The value of the environment variable, as a string. If no such variable108 can be found, return the empty string.109 """110 cwd = os.path.join(self._build_root, 'src', 'scripts')111 if board:112 portageq = 'portageq-%s' % board113 else:114 portageq = 'portageq'115 binhost = cros_build_lib.RunCommand(116 [portageq, 'envvar', envvar], cwd=cwd, redirect_stdout=True,117 enter_chroot=True, error_code_ok=True)118 return binhost.output.rstrip('\n')119 @staticmethod120 def _GetSlavesForMaster(build_config, configs=None):121 """Gets the important builds corresponding to this master.122 Returns:123 A list of the slaves for this builder.124 """125 if configs is None:126 configs = cbuildbot_config.config127 builders = []128 assert build_config['manifest_version']129 assert build_config['master']130 for config in configs.itervalues():131 if (config['important'] and132 config['manifest_version'] and133 config['build_type'] == build_config['build_type'] and134 config['chrome_rev'] == build_config['chrome_rev'] and135 config['branch'] == build_config['branch']):136 builders.append(config)137 return builders138 def _Begin(self):139 """Can be overridden. Called before a stage is performed."""140 # Tell the buildbot we are starting a new step for the waterfall141 self._Print('\n@@@BUILD_STEP %s@@@\n' % self.name)142 self._PrintLoudly('Start Stage %s - %s\n\n%s' % (143 self.name, time.strftime('%H:%M:%S'), self.__doc__))144 def _Finish(self):145 """Can be overridden. Called after a stage has been performed."""146 self._PrintLoudly('Finished Stage %s - %s' %147 (self.name, time.strftime('%H:%M:%S')))148 def PerformStage(self):149 """Subclassed stages must override this function to perform what they want150 to be done.151 """152 def _HandleExceptionAsSuccess(self, _exception):153 """Use instead of HandleStageException to ignore an exception."""154 return results_lib.Results.SUCCESS, None155 @staticmethod156 def _StringifyException(exception):157 """Convert an exception into a string.158 This can only be called as part of an except block.159 """160 if isinstance(exception, results_lib.StepFailure):161 return str(exception)162 else:163 return traceback.format_exc()164 def _HandleExceptionAsWarning(self, exception):165 """Use instead of HandleStageException to treat an exception as a warning.166 This is used by the ForgivingBuilderStage's to treat any exceptions as167 warnings instead of stage failures.168 """169 cros_build_lib.PrintBuildbotStepWarnings()170 cros_build_lib.Warning(self._StringifyException(exception))171 return results_lib.Results.FORGIVEN, None172 def _HandleStageException(self, exception):173 """Called when PerformStage throws an exception. Can be overriden.174 Should return result, description. Description should be None if result175 is not an exception.176 """177 # Tell the user about the exception, and record it178 description = self._StringifyException(exception)179 cros_build_lib.PrintBuildbotStepFailure()180 cros_build_lib.Error(description)181 return exception, description182 def HandleSkip(self):183 """Run if the stage is skipped."""184 pass185 def Run(self):186 """Have the builder execute the stage."""187 if (self.option_name and not getattr(self._options, self.option_name) or188 self.config_name and not self._build_config[self.config_name]):189 self._PrintLoudly('Not running Stage %s' % self.name)190 self.HandleSkip()191 results_lib.Results.Record(self.name, results_lib.Results.SKIPPED)192 return193 record = results_lib.Results.PreviouslyCompletedRecord(self.name)194 if record:195 # Success is stored in the results log for a stage that completed196 # successfully in a previous run.197 self._PrintLoudly('Stage %s processed previously' % self.name)198 self.HandleSkip()199 results_lib.Results.Record(self.name, results_lib.Results.SUCCESS, None,200 float(record[2]))201 return202 start_time = time.time()203 # Set default values204 result = results_lib.Results.SUCCESS205 description = None206 sys.stdout.flush()207 sys.stderr.flush()208 self._Begin()209 try:210 self.PerformStage()211 except SystemExit as e:212 if e.code != 0:213 result, description = self._HandleStageException(e)214 raise215 except Exception as e:216 if mox is not None and isinstance(e, mox.Error):217 raise218 # Tell the build bot this step failed for the waterfall219 result, description = self._HandleStageException(e)220 if result not in (results_lib.Results.FORGIVEN,221 results_lib.Results.SUCCESS):222 raise results_lib.StepFailure()223 except BaseException as e:224 result, description = self._HandleStageException(e)225 raise226 finally:227 elapsed_time = time.time() - start_time228 results_lib.Results.Record(self.name, result, description,229 time=elapsed_time)230 self._Finish()231 sys.stdout.flush()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful