Best Python code snippet using playwright-python
Scheduler.py
Source:Scheduler.py  
1# Copyright 1999-2014 Gentoo Foundation2# Distributed under the terms of the GNU General Public License v23from __future__ import division, print_function, unicode_literals4from collections import deque5import gc6import gzip7import logging8import signal9import sys10import textwrap11import time12import warnings13import weakref14import zlib15import portage16from portage import os17from portage import _encodings18from portage import _unicode_encode19from portage.cache.mappings import slot_dict_class20from portage.elog.messages import eerror21from portage.localization import _22from portage.output import colorize, create_color_func, red23bad = create_color_func("BAD")24from portage._sets import SETPREFIX25from portage._sets.base import InternalPackageSet26from portage.util import ensure_dirs, writemsg, writemsg_level27from portage.util.SlotObject import SlotObject28from portage.util._async.SchedulerInterface import SchedulerInterface29from portage.util._eventloop.EventLoop import EventLoop30from portage.package.ebuild.digestcheck import digestcheck31from portage.package.ebuild.digestgen import digestgen32from portage.package.ebuild.doebuild import (_check_temp_dir,33	_prepare_self_update)34from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs35import _emerge36from _emerge.BinpkgFetcher import BinpkgFetcher37from _emerge.BinpkgPrefetcher import BinpkgPrefetcher38from _emerge.BinpkgVerifier import BinpkgVerifier39from _emerge.Blocker import Blocker40from _emerge.BlockerDB import BlockerDB41from _emerge.clear_caches import clear_caches42from _emerge.create_depgraph_params import create_depgraph_params43from _emerge.create_world_atom import create_world_atom44from _emerge.DepPriority import DepPriority45from _emerge.depgraph import depgraph, resume_depgraph46from _emerge.EbuildBuildDir import EbuildBuildDir47from _emerge.EbuildFetcher import EbuildFetcher48from _emerge.EbuildPhase import EbuildPhase49from _emerge.emergelog import emergelog50from _emerge.FakeVartree import FakeVartree51from _emerge.getloadavg import getloadavg52from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps53from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo54from _emerge.JobStatusDisplay import JobStatusDisplay55from _emerge.MergeListItem import MergeListItem56from _emerge.Package import Package57from _emerge.PackageMerge import PackageMerge58from _emerge.PollScheduler import PollScheduler59from _emerge.SequentialTaskQueue import SequentialTaskQueue60if sys.hexversion >= 0x3000000:61	basestring = str62# enums63FAILURE = 164class Scheduler(PollScheduler):65	# max time between loadavg checks (seconds)66	_loadavg_latency = 3067	# max time between display status updates (seconds)68	_max_display_latency = 369	_opts_ignore_blockers = \70		frozenset(["--buildpkgonly",71		"--fetchonly", "--fetch-all-uri",72		"--nodeps", "--pretend"])73	_opts_no_background = \74		frozenset(["--pretend",75		"--fetchonly", "--fetch-all-uri"])76	_opts_no_self_update = frozenset(["--buildpkgonly",77		"--fetchonly", "--fetch-all-uri", "--pretend"])78	class _iface_class(SchedulerInterface):79		__slots__ = ("fetch",80			"scheduleSetup", "scheduleUnpack")81	class _fetch_iface_class(SlotObject):82		__slots__ = ("log_file", "schedule")83	_task_queues_class = slot_dict_class(84		("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")85	class _build_opts_class(SlotObject):86		__slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",87			"fetch_all_uri", "fetchonly", "pretend")88	class _binpkg_opts_class(SlotObject):89		__slots__ = ("fetchonly", "getbinpkg", "pretend")90	class _pkg_count_class(SlotObject):91		__slots__ = ("curval", "maxval")92	class _emerge_log_class(SlotObject):93		__slots__ = ("xterm_titles",)94		def log(self, *pargs, **kwargs):95			if not self.xterm_titles:96				# Avoid interference with the scheduler's status display.97				kwargs.pop("short_msg", None)98			emergelog(self.xterm_titles, *pargs, **kwargs)99	class _failed_pkg(SlotObject):100		__slots__ = ("build_dir", "build_log", "pkg",101			"postinst_failure", "returncode")102	class _ConfigPool(object):103		"""Interface for a task to temporarily allocate a config104		instance from a pool. This allows a task to be constructed105		long before the config instance actually becomes needed, like106		when prefetchers are constructed for the whole merge list."""107		__slots__ = ("_root", "_allocate", "_deallocate")108		def __init__(self, root, allocate, deallocate):109			self._root = root110			self._allocate = allocate111			self._deallocate = deallocate112		def allocate(self):113			return self._allocate(self._root)114		def deallocate(self, settings):115			self._deallocate(settings)116	class _unknown_internal_error(portage.exception.PortageException):117		"""118		Used internally to terminate scheduling. The specific reason for119		the failure should have been dumped to stderr.120		"""121		def __init__(self, value=""):122			portage.exception.PortageException.__init__(self, value)123	def __init__(self, settings, trees, mtimedb, myopts,124		spinner, mergelist=None, favorites=None, graph_config=None):125		PollScheduler.__init__(self, main=True)126		if mergelist is not None:127			warnings.warn("The mergelist parameter of the " + \128				"_emerge.Scheduler constructor is now unused. Use " + \129				"the graph_config parameter instead.",130				DeprecationWarning, stacklevel=2)131		self.settings = settings132		self.target_root = settings["EROOT"]133		self.trees = trees134		self.myopts = myopts135		self._spinner = spinner136		self._mtimedb = mtimedb137		self._favorites = favorites138		self._args_set = InternalPackageSet(favorites, allow_repo=True)139		self._build_opts = self._build_opts_class()140		for k in self._build_opts.__slots__:141			setattr(self._build_opts, k, myopts.get("--" + k.replace("_", "-")))142		self._build_opts.buildpkg_exclude = InternalPackageSet( \143			initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \144			allow_wildcard=True, allow_repo=True)145		if "mirror" in self.settings.features:146			self._build_opts.fetch_all_uri = True147		self._binpkg_opts = self._binpkg_opts_class()148		for k in self._binpkg_opts.__slots__:149			setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)150		self.curval = 0151		self._logger = self._emerge_log_class()152		self._task_queues = self._task_queues_class()153		for k in self._task_queues.allowed_keys:154			setattr(self._task_queues, k,155				SequentialTaskQueue())156		# Holds merges that will wait to be executed when no builds are157		# executing. This is useful for system packages since dependencies158		# on system packages are frequently unspecified. For example, see159		# bug #256616.160		self._merge_wait_queue = deque()161		# Holds merges that have been transfered from the merge_wait_queue to162		# the actual merge queue. They are removed from this list upon163		# completion. Other packages can start building only when this list is164		# empty.165		self._merge_wait_scheduled = []166		# Holds system packages and their deep runtime dependencies. Before167		# being merged, these packages go to merge_wait_queue, to be merged168		# when no other packages are building.169		self._deep_system_deps = set()170		# Holds packages to merge which will satisfy currently unsatisfied171		# deep runtime dependencies of system packages. If this is not empty172		# then no parallel builds will be spawned until it is empty. This173		# minimizes the possibility that a build will fail due to the system174		# being in a fragile state. For example, see bug #259954.175		self._unsatisfied_system_deps = set()176		self._status_display = JobStatusDisplay(177			xterm_titles=('notitles' not in settings.features))178		self._max_load = myopts.get("--load-average")179		max_jobs = myopts.get("--jobs")180		if max_jobs is None:181			max_jobs = 1182		self._set_max_jobs(max_jobs)183		self._running_root = trees[trees._running_eroot]["root_config"]184		self.edebug = 0185		if settings.get("PORTAGE_DEBUG", "") == "1":186			self.edebug = 1187		self.pkgsettings = {}188		self._config_pool = {}189		for root in self.trees:190			self._config_pool[root] = []191		self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,192			'emerge-fetch.log')193		fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,194			schedule=self._schedule_fetch)195		self._sched_iface = self._iface_class(196			self._event_loop,197			is_background=self._is_background,198			fetch=fetch_iface,199			scheduleSetup=self._schedule_setup,200			scheduleUnpack=self._schedule_unpack)201		self._prefetchers = weakref.WeakValueDictionary()202		self._pkg_queue = []203		self._jobs = 0204		self._running_tasks = {}205		self._completed_tasks = set()206		self._main_exit = None207		self._main_loadavg_handle = None208		self._failed_pkgs = []209		self._failed_pkgs_all = []210		self._failed_pkgs_die_msgs = []211		self._post_mod_echo_msgs = []212		self._parallel_fetch = False213		self._init_graph(graph_config)214		merge_count = len([x for x in self._mergelist \215			if isinstance(x, Package) and x.operation == "merge"])216		self._pkg_count = self._pkg_count_class(217			curval=0, maxval=merge_count)218		self._status_display.maxval = self._pkg_count.maxval219		# The load average takes some time to respond when new220		# jobs are added, so we need to limit the rate of adding221		# new jobs.222		self._job_delay_max = 5223		self._previous_job_start_time = None224		self._job_delay_timeout_id = None225		# The load average takes some time to respond when after226		# a SIGSTOP/SIGCONT cycle, so delay scheduling for some227		# time after SIGCONT is received.228		self._sigcont_delay = 5229		self._sigcont_time = None230		# This is used to memoize the _choose_pkg() result when231		# no packages can be chosen until one of the existing232		# jobs completes.233		self._choose_pkg_return_early = False234		features = self.settings.features235		if "parallel-fetch" in features and \236			not ("--pretend" in self.myopts or \237			"--fetch-all-uri" in self.myopts or \238			"--fetchonly" in self.myopts):239			if "distlocks" not in features:240				portage.writemsg(red("!!!")+"\n", noiselevel=-1)241				portage.writemsg(red("!!!")+" parallel-fetching " + \242					"requires the distlocks feature enabled"+"\n",243					noiselevel=-1)244				portage.writemsg(red("!!!")+" you have it disabled, " + \245					"thus parallel-fetching is being disabled"+"\n",246					noiselevel=-1)247				portage.writemsg(red("!!!")+"\n", noiselevel=-1)248			elif merge_count > 1:249				self._parallel_fetch = True250		if self._parallel_fetch:251			# clear out existing fetch log if it exists252			try:253				open(self._fetch_log, 'w').close()254			except EnvironmentError:255				pass256		self._running_portage = None257		portage_match = self._running_root.trees["vartree"].dbapi.match(258			portage.const.PORTAGE_PACKAGE_ATOM)259		if portage_match:260			cpv = portage_match.pop()261			self._running_portage = self._pkg(cpv, "installed",262				self._running_root, installed=True)263	def _handle_self_update(self):264		if self._opts_no_self_update.intersection(self.myopts):265			return os.EX_OK266		for x in self._mergelist:267			if not isinstance(x, Package):268				continue269			if x.operation != "merge":270				continue271			if x.root != self._running_root.root:272				continue273			if not portage.dep.match_from_list(274				portage.const.PORTAGE_PACKAGE_ATOM, [x]):275				continue276			rval = _check_temp_dir(self.settings)277			if rval != os.EX_OK:278				return rval279			_prepare_self_update(self.settings)280			break281		return os.EX_OK282	def _terminate_tasks(self):283		self._status_display.quiet = True284		for task in list(self._running_tasks.values()):285			if task.isAlive():286				# This task should keep the main loop running until287				# it has had an opportunity to clean up after itself.288				# Rely on its exit hook to remove it from289				# self._running_tasks when it has finished cleaning up.290				task.cancel()291			else:292				# This task has been waiting to be started in one of293				# self._task_queues which are all cleared below. It294				# will never be started, so purged it from295				# self._running_tasks so that it won't keep the main296				# loop running.297				del self._running_tasks[id(task)]298		for q in self._task_queues.values():299			q.clear()300	def _init_graph(self, graph_config):301		"""302		Initialization structures used for dependency calculations303		involving currently installed packages.304		"""305		self._set_graph_config(graph_config)306		self._blocker_db = {}307		depgraph_params = create_depgraph_params(self.myopts, None)308		dynamic_deps = "dynamic_deps" in depgraph_params309		ignore_built_slot_operator_deps = self.myopts.get(310			"--ignore-built-slot-operator-deps", "n") == "y"311		for root in self.trees:312			if graph_config is None:313				fake_vartree = FakeVartree(self.trees[root]["root_config"],314					pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,315					ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)316				fake_vartree.sync()317			else:318				fake_vartree = graph_config.trees[root]['vartree']319			self._blocker_db[root] = BlockerDB(fake_vartree)320	def _destroy_graph(self):321		"""322		Use this to free memory at the beginning of _calc_resume_list().323		After _calc_resume_list(), the _init_graph() method324		must to be called in order to re-generate the structures that325		this method destroys.326		"""327		self._blocker_db = None328		self._set_graph_config(None)329		gc.collect()330	def _set_max_jobs(self, max_jobs):331		self._max_jobs = max_jobs332		self._task_queues.jobs.max_jobs = max_jobs333		if "parallel-install" in self.settings.features:334			self._task_queues.merge.max_jobs = max_jobs335	def _background_mode(self):336		"""337		Check if background mode is enabled and adjust states as necessary.338		@rtype: bool339		@return: True if background mode is enabled, False otherwise.340		"""341		background = (self._max_jobs is True or \342			self._max_jobs > 1 or "--quiet" in self.myopts \343			or self.myopts.get("--quiet-build") == "y") and \344			not bool(self._opts_no_background.intersection(self.myopts))345		if background:346			interactive_tasks = self._get_interactive_tasks()347			if interactive_tasks:348				background = False349				writemsg_level(">>> Sending package output to stdio due " + \350					"to interactive package(s):\n",351					level=logging.INFO, noiselevel=-1)352				msg = [""]353				for pkg in interactive_tasks:354					pkg_str = "  " + colorize("INFORM", str(pkg.cpv))355					if pkg.root_config.settings["ROOT"] != "/":356						pkg_str += " for " + pkg.root357					msg.append(pkg_str)358				msg.append("")359				writemsg_level("".join("%s\n" % (l,) for l in msg),360					level=logging.INFO, noiselevel=-1)361				if self._max_jobs is True or self._max_jobs > 1:362					self._set_max_jobs(1)363					writemsg_level(">>> Setting --jobs=1 due " + \364						"to the above interactive package(s)\n",365						level=logging.INFO, noiselevel=-1)366					writemsg_level(">>> In order to temporarily mask " + \367						"interactive updates, you may\n" + \368						">>> specify --accept-properties=-interactive\n",369						level=logging.INFO, noiselevel=-1)370		self._status_display.quiet = \371			not background or \372			("--quiet" in self.myopts and \373			"--verbose" not in self.myopts)374		self._logger.xterm_titles = \375			"notitles" not in self.settings.features and \376			self._status_display.quiet377		return background378	def _get_interactive_tasks(self):379		interactive_tasks = []380		for task in self._mergelist:381			if not (isinstance(task, Package) and \382				task.operation == "merge"):383				continue384			if 'interactive' in task.properties:385				interactive_tasks.append(task)386		return interactive_tasks387	def _set_graph_config(self, graph_config):388		if graph_config is None:389			self._graph_config = None390			self._pkg_cache = {}391			self._digraph = None392			self._mergelist = []393			self._world_atoms = None394			self._deep_system_deps.clear()395			return396		self._graph_config = graph_config397		self._pkg_cache = graph_config.pkg_cache398		self._digraph = graph_config.graph399		self._mergelist = graph_config.mergelist400		# Generate world atoms while the event loop is not running,401		# since otherwise portdbapi match calls in the create_world_atom402		# function could trigger event loop recursion.403		self._world_atoms = {}404		for pkg in self._mergelist:405			if getattr(pkg, 'operation', None) != 'merge':406				continue407			atom = create_world_atom(pkg, self._args_set,408				pkg.root_config, before_install=True)409			if atom is not None:410				self._world_atoms[pkg] = atom411		if "--nodeps" in self.myopts or \412			(self._max_jobs is not True and self._max_jobs < 2):413			# save some memory414			self._digraph = None415			graph_config.graph = None416			graph_config.pkg_cache.clear()417			self._deep_system_deps.clear()418			for pkg in self._mergelist:419				self._pkg_cache[pkg] = pkg420			return421		self._find_system_deps()422		self._prune_digraph()423		self._prevent_builddir_collisions()424		if '--debug' in self.myopts:425			writemsg("\nscheduler digraph:\n\n", noiselevel=-1)426			self._digraph.debug_print()427			writemsg("\n", noiselevel=-1)428	def _find_system_deps(self):429		"""430		Find system packages and their deep runtime dependencies. Before being431		merged, these packages go to merge_wait_queue, to be merged when no432		other packages are building.433		NOTE: This can only find deep system deps if the system set has been434		added to the graph and traversed deeply (the depgraph "complete"435		parameter will do this, triggered by emerge --complete-graph option).436		"""437		deep_system_deps = self._deep_system_deps438		deep_system_deps.clear()439		deep_system_deps.update(440			_find_deep_system_runtime_deps(self._digraph))441		deep_system_deps.difference_update([pkg for pkg in \442			deep_system_deps if pkg.operation != "merge"])443	def _prune_digraph(self):444		"""445		Prune any root nodes that are irrelevant.446		"""447		graph = self._digraph448		completed_tasks = self._completed_tasks449		removed_nodes = set()450		while True:451			for node in graph.root_nodes():452				if not isinstance(node, Package) or \453					(node.installed and node.operation == "nomerge") or \454					node.onlydeps or \455					node in completed_tasks:456					removed_nodes.add(node)457			if removed_nodes:458				graph.difference_update(removed_nodes)459			if not removed_nodes:460				break461			removed_nodes.clear()462	def _prevent_builddir_collisions(self):463		"""464		When building stages, sometimes the same exact cpv needs to be merged465		to both $ROOTs. Add edges to the digraph in order to avoid collisions466		in the builddir. Currently, normal file locks would be inappropriate467		for this purpose since emerge holds all of it's build dir locks from468		the main process.469		"""470		cpv_map = {}471		for pkg in self._mergelist:472			if not isinstance(pkg, Package):473				# a satisfied blocker474				continue475			if pkg.installed:476				continue477			if pkg.cpv not in cpv_map:478				cpv_map[pkg.cpv] = [pkg]479				continue480			for earlier_pkg in cpv_map[pkg.cpv]:481				self._digraph.add(earlier_pkg, pkg,482					priority=DepPriority(buildtime=True))483			cpv_map[pkg.cpv].append(pkg)484	class _pkg_failure(portage.exception.PortageException):485		"""486		An instance of this class is raised by unmerge() when487		an uninstallation fails.488		"""489		status = 1490		def __init__(self, *pargs):491			portage.exception.PortageException.__init__(self, pargs)492			if pargs:493				self.status = pargs[0]494	def _schedule_fetch(self, fetcher):495		"""496		Schedule a fetcher, in order to control the number of concurrent497		fetchers. If self._max_jobs is greater than 1 then the fetch498		queue is bypassed and the fetcher is started immediately,499		otherwise it is added to the front of the parallel-fetch queue.500		NOTE: The parallel-fetch queue is currently used to serialize501		access to the parallel-fetch log, so changes in the log handling502		would be required before it would be possible to enable503		concurrent fetching within the parallel-fetch queue.504		"""505		if self._max_jobs > 1:506			fetcher.start()507		else:508			self._task_queues.fetch.addFront(fetcher)509	def _schedule_setup(self, setup_phase):510		"""511		Schedule a setup phase on the merge queue, in order to512		serialize unsandboxed access to the live filesystem.513		"""514		if self._task_queues.merge.max_jobs > 1 and \515			"ebuild-locks" in self.settings.features:516			# Use a separate queue for ebuild-locks when the merge517			# queue allows more than 1 job (due to parallel-install),518			# since the portage.locks module does not behave as desired519			# if we try to lock the same file multiple times520			# concurrently from the same process.521			self._task_queues.ebuild_locks.add(setup_phase)522		else:523			self._task_queues.merge.add(setup_phase)524		self._schedule()525	def _schedule_unpack(self, unpack_phase):526		"""527		Schedule an unpack phase on the unpack queue, in order528		to serialize $DISTDIR access for live ebuilds.529		"""530		self._task_queues.unpack.add(unpack_phase)531	def _find_blockers(self, new_pkg):532		"""533		Returns a callable.534		"""535		def get_blockers():536			return self._find_blockers_impl(new_pkg)537		return get_blockers538	def _find_blockers_impl(self, new_pkg):539		if self._opts_ignore_blockers.intersection(self.myopts):540			return None541		blocker_db = self._blocker_db[new_pkg.root]542		blocked_pkgs = []543		for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):544			if new_pkg.slot_atom == blocking_pkg.slot_atom:545				continue546			if new_pkg.cpv == blocking_pkg.cpv:547				continue548			blocked_pkgs.append(blocking_pkg)549		return blocked_pkgs550	def _generate_digests(self):551		"""552		Generate digests if necessary for --digests or FEATURES=digest.553		In order to avoid interference, this must done before parallel554		tasks are started.555		"""556		digest = '--digest' in self.myopts557		if not digest:558			for pkgsettings in self.pkgsettings.values():559				if pkgsettings.mycpv is not None:560					# ensure that we are using global features561					# settings rather than those from package.env562					pkgsettings.reset()563				if 'digest' in pkgsettings.features:564					digest = True565					break566		if not digest:567			return os.EX_OK568		for x in self._mergelist:569			if not isinstance(x, Package) or \570				x.type_name != 'ebuild' or \571				x.operation != 'merge':572				continue573			pkgsettings = self.pkgsettings[x.root]574			if pkgsettings.mycpv is not None:575				# ensure that we are using global features576				# settings rather than those from package.env577				pkgsettings.reset()578			if '--digest' not in self.myopts and \579				'digest' not in pkgsettings.features:580				continue581			portdb = x.root_config.trees['porttree'].dbapi582			ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)583			if ebuild_path is None:584				raise AssertionError("ebuild not found for '%s'" % x.cpv)585			pkgsettings['O'] = os.path.dirname(ebuild_path)586			if not digestgen(mysettings=pkgsettings, myportdb=portdb):587				writemsg_level(588					"!!! Unable to generate manifest for '%s'.\n" \589					% x.cpv, level=logging.ERROR, noiselevel=-1)590				return FAILURE591		return os.EX_OK592	def _check_manifests(self):593		# Verify all the manifests now so that the user is notified of failure594		# as soon as possible.595		if "strict" not in self.settings.features or \596			"--fetchonly" in self.myopts or \597			"--fetch-all-uri" in self.myopts:598			return os.EX_OK599		shown_verifying_msg = False600		quiet_settings = {}601		for myroot, pkgsettings in self.pkgsettings.items():602			quiet_config = portage.config(clone=pkgsettings)603			quiet_config["PORTAGE_QUIET"] = "1"604			quiet_config.backup_changes("PORTAGE_QUIET")605			quiet_settings[myroot] = quiet_config606			del quiet_config607		failures = 0608		for x in self._mergelist:609			if not isinstance(x, Package) or \610				x.type_name != "ebuild":611				continue612			if x.operation == "uninstall":613				continue614			if not shown_verifying_msg:615				shown_verifying_msg = True616				self._status_msg("Verifying ebuild manifests")617			root_config = x.root_config618			portdb = root_config.trees["porttree"].dbapi619			quiet_config = quiet_settings[root_config.root]620			ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)621			if ebuild_path is None:622				raise AssertionError("ebuild not found for '%s'" % x.cpv)623			quiet_config["O"] = os.path.dirname(ebuild_path)624			if not digestcheck([], quiet_config, strict=True):625				failures |= 1626		if failures:627			return FAILURE628		return os.EX_OK629	def _add_prefetchers(self):630		if not self._parallel_fetch:631			return632		if self._parallel_fetch:633			prefetchers = self._prefetchers634			for pkg in self._mergelist:635				# mergelist can contain solved Blocker instances636				if not isinstance(pkg, Package) or pkg.operation == "uninstall":637					continue638				prefetcher = self._create_prefetcher(pkg)639				if prefetcher is not None:640					# This will start the first prefetcher immediately, so that641					# self._task() won't discard it. This avoids a case where642					# the first prefetcher is discarded, causing the second643					# prefetcher to occupy the fetch queue before the first644					# fetcher has an opportunity to execute.645					prefetchers[pkg] = prefetcher646					self._task_queues.fetch.add(prefetcher)647	def _create_prefetcher(self, pkg):648		"""649		@return: a prefetcher, or None if not applicable650		"""651		prefetcher = None652		if not isinstance(pkg, Package):653			pass654		elif pkg.type_name == "ebuild":655			prefetcher = EbuildFetcher(background=True,656				config_pool=self._ConfigPool(pkg.root,657				self._allocate_config, self._deallocate_config),658				fetchonly=1, fetchall=self._build_opts.fetch_all_uri,659				logfile=self._fetch_log,660				pkg=pkg, prefetch=True, scheduler=self._sched_iface)661		elif pkg.type_name == "binary" and \662			"--getbinpkg" in self.myopts and \663			pkg.root_config.trees["bintree"].isremote(pkg.cpv):664			prefetcher = BinpkgPrefetcher(background=True,665				pkg=pkg, scheduler=self._sched_iface)666		return prefetcher667	def _run_pkg_pretend(self):668		"""669		Since pkg_pretend output may be important, this method sends all670		output directly to stdout (regardless of options like --quiet or671		--jobs).672		"""673		failures = 0674		# Use a local EventLoop instance here, since we don't675		# want tasks here to trigger the usual Scheduler callbacks676		# that handle job scheduling and status display.677		sched_iface = SchedulerInterface(EventLoop(main=False))678		for x in self._mergelist:679			if not isinstance(x, Package):680				continue681			if x.operation == "uninstall":682				continue683			if x.eapi in ("0", "1", "2", "3"):684				continue685			if "pretend" not in x.defined_phases:686				continue687			out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"688			portage.util.writemsg_stdout(out_str, noiselevel=-1)689			root_config = x.root_config690			settings = self.pkgsettings[root_config.root]691			settings.setcpv(x)692			# setcpv/package.env allows for per-package PORTAGE_TMPDIR so we693			# have to validate it for each package694			rval = _check_temp_dir(settings)695			if rval != os.EX_OK:696				return rval697			build_dir_path = os.path.join(698				os.path.realpath(settings["PORTAGE_TMPDIR"]),699				"portage", x.category, x.pf)700			existing_builddir = os.path.isdir(build_dir_path)701			settings["PORTAGE_BUILDDIR"] = build_dir_path702			build_dir = EbuildBuildDir(scheduler=sched_iface,703				settings=settings)704			sched_iface.run_until_complete(build_dir.async_lock())705			current_task = None706			try:707				# Clean up the existing build dir, in case pkg_pretend708				# checks for available space (bug #390711).709				if existing_builddir:710					if x.built:711						tree = "bintree"712						infloc = os.path.join(build_dir_path, "build-info")713						ebuild_path = os.path.join(infloc, x.pf + ".ebuild")714					else:715						tree = "porttree"716						portdb = root_config.trees["porttree"].dbapi717						ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)718						if ebuild_path is None:719							raise AssertionError(720								"ebuild not found for '%s'" % x.cpv)721					portage.package.ebuild.doebuild.doebuild_environment(722						ebuild_path, "clean", settings=settings,723						db=self.trees[settings['EROOT']][tree].dbapi)724					clean_phase = EbuildPhase(background=False,725						phase='clean', scheduler=sched_iface, settings=settings)726					current_task = clean_phase727					clean_phase.start()728					clean_phase.wait()729				if x.built:730					tree = "bintree"731					bintree = root_config.trees["bintree"].dbapi.bintree732					fetched = False733					# Display fetch on stdout, so that it's always clear what734					# is consuming time here.735					if bintree.isremote(x.cpv):736						fetcher = BinpkgFetcher(pkg=x,737							scheduler=sched_iface)738						fetcher.start()739						if fetcher.wait() != os.EX_OK:740							failures += 1741							continue742						fetched = fetcher.pkg_path743					if fetched is False:744						filename = bintree.getname(x.cpv)745					else:746						filename = fetched747					verifier = BinpkgVerifier(pkg=x,748						scheduler=sched_iface, _pkg_path=filename)749					current_task = verifier750					verifier.start()751					if verifier.wait() != os.EX_OK:752						failures += 1753						continue754					if fetched:755						bintree.inject(x.cpv, filename=fetched)756					tbz2_file = bintree.getname(x.cpv)757					infloc = os.path.join(build_dir_path, "build-info")758					ensure_dirs(infloc)759					portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)760					ebuild_path = os.path.join(infloc, x.pf + ".ebuild")761					settings.configdict["pkg"]["EMERGE_FROM"] = "binary"762					settings.configdict["pkg"]["MERGE_TYPE"] = "binary"763				else:764					tree = "porttree"765					portdb = root_config.trees["porttree"].dbapi766					ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)767					if ebuild_path is None:768						raise AssertionError("ebuild not found for '%s'" % x.cpv)769					settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"770					if self._build_opts.buildpkgonly:771						settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"772					else:773						settings.configdict["pkg"]["MERGE_TYPE"] = "source"774				portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,775					"pretend", settings=settings,776					db=self.trees[settings['EROOT']][tree].dbapi)777				prepare_build_dirs(root_config.root, settings, cleanup=0)778				vardb = root_config.trees['vartree'].dbapi779				settings["REPLACING_VERSIONS"] = " ".join(780					set(portage.versions.cpv_getversion(match) \781						for match in vardb.match(x.slot_atom) + \782						vardb.match('='+x.cpv)))783				pretend_phase = EbuildPhase(784					phase="pretend", scheduler=sched_iface,785					settings=settings)786				current_task = pretend_phase787				pretend_phase.start()788				ret = pretend_phase.wait()789				if ret != os.EX_OK:790					failures += 1791				portage.elog.elog_process(x.cpv, settings)792			finally:793				if current_task is not None:794					if current_task.isAlive():795						current_task.cancel()796						current_task.wait()797					if current_task.returncode == os.EX_OK:798						clean_phase = EbuildPhase(background=False,799							phase='clean', scheduler=sched_iface,800							settings=settings)801						clean_phase.start()802						clean_phase.wait()803				sched_iface.run_until_complete(build_dir.async_unlock())804		if failures:805			return FAILURE806		return os.EX_OK807	def merge(self):808		if "--resume" in self.myopts:809			# We're resuming.810			portage.writemsg_stdout(811				colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)812			self._logger.log(" *** Resuming merge...")813		self._save_resume_list()814		try:815			self._background = self._background_mode()816		except self._unknown_internal_error:817			return FAILURE818		rval = self._handle_self_update()819		if rval != os.EX_OK:820			return rval821		for root in self.trees:822			root_config = self.trees[root]["root_config"]823			# Even for --pretend --fetch mode, PORTAGE_TMPDIR is required824			# since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR825			# for ensuring sane $PWD (bug #239560) and storing elog messages.826			tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")827			if not tmpdir or not os.path.isdir(tmpdir):828				msg = (829					'The directory specified in your PORTAGE_TMPDIR variable does not exist:',830					tmpdir,831					'Please create this directory or correct your PORTAGE_TMPDIR setting.',832				)833				out = portage.output.EOutput()834				for l in msg:835					out.eerror(l)836				return FAILURE837			if self._background:838				root_config.settings.unlock()839				root_config.settings["PORTAGE_BACKGROUND"] = "1"840				root_config.settings.backup_changes("PORTAGE_BACKGROUND")841				root_config.settings.lock()842			self.pkgsettings[root] = portage.config(843				clone=root_config.settings)844		keep_going = "--keep-going" in self.myopts845		fetchonly = self._build_opts.fetchonly846		mtimedb = self._mtimedb847		failed_pkgs = self._failed_pkgs848		rval = self._generate_digests()849		if rval != os.EX_OK:850			return rval851		# TODO: Immediately recalculate deps here if --keep-going852		#       is enabled and corrupt manifests are detected.853		rval = self._check_manifests()854		if rval != os.EX_OK and not keep_going:855			return rval856		if not fetchonly:857			rval = self._run_pkg_pretend()858			if rval != os.EX_OK:859				return rval860		while True:861			received_signal = []862			def sighandler(signum, frame):863				signal.signal(signal.SIGINT, signal.SIG_IGN)864				signal.signal(signal.SIGTERM, signal.SIG_IGN)865				portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \866					{"signal":signum})867				self.terminate()868				received_signal.append(128 + signum)869			earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)870			earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)871			earlier_sigcont_handler = \872				signal.signal(signal.SIGCONT, self._sigcont_handler)873			signal.siginterrupt(signal.SIGCONT, False)874			try:875				rval = self._merge()876			finally:877				# Restore previous handlers878				if earlier_sigint_handler is not None:879					signal.signal(signal.SIGINT, earlier_sigint_handler)880				else:881					signal.signal(signal.SIGINT, signal.SIG_DFL)882				if earlier_sigterm_handler is not None:883					signal.signal(signal.SIGTERM, earlier_sigterm_handler)884				else:885					signal.signal(signal.SIGTERM, signal.SIG_DFL)886				if earlier_sigcont_handler is not None:887					signal.signal(signal.SIGCONT, earlier_sigcont_handler)888				else:889					signal.signal(signal.SIGCONT, signal.SIG_DFL)890			self._termination_check()891			if received_signal:892				sys.exit(received_signal[0])893			if rval == os.EX_OK or fetchonly or not keep_going:894				break895			if "resume" not in mtimedb:896				break897			mergelist = self._mtimedb["resume"].get("mergelist")898			if not mergelist:899				break900			if not failed_pkgs:901				break902			for failed_pkg in failed_pkgs:903				mergelist.remove(list(failed_pkg.pkg))904			self._failed_pkgs_all.extend(failed_pkgs)905			del failed_pkgs[:]906			if not mergelist:907				break908			if not self._calc_resume_list():909				break910			clear_caches(self.trees)911			if not self._mergelist:912				break913			self._save_resume_list()914			self._pkg_count.curval = 0915			self._pkg_count.maxval = len([x for x in self._mergelist \916				if isinstance(x, Package) and x.operation == "merge"])917			self._status_display.maxval = self._pkg_count.maxval918		# Cleanup any callbacks that have been registered with the global919		# event loop by calls to the terminate method.920		self._cleanup()921		self._logger.log(" *** Finished. Cleaning up...")922		if failed_pkgs:923			self._failed_pkgs_all.extend(failed_pkgs)924			del failed_pkgs[:]925		printer = portage.output.EOutput()926		background = self._background927		failure_log_shown = False928		if background and len(self._failed_pkgs_all) == 1 and \929			self.myopts.get('--quiet-fail', 'n') != 'y':930			# If only one package failed then just show it's931			# whole log for easy viewing.932			failed_pkg = self._failed_pkgs_all[-1]933			log_file = None934			log_file_real = None935			log_path = self._locate_failure_log(failed_pkg)936			if log_path is not None:937				try:938					log_file = open(_unicode_encode(log_path,939						encoding=_encodings['fs'], errors='strict'), mode='rb')940				except IOError:941					pass942				else:943					if log_path.endswith('.gz'):944						log_file_real = log_file945						log_file =  gzip.GzipFile(filename='',946							mode='rb', fileobj=log_file)947			if log_file is not None:948				try:949					for line in log_file:950						writemsg_level(line, noiselevel=-1)951				except zlib.error as e:952					writemsg_level("%s\n" % (e,), level=logging.ERROR,953						noiselevel=-1)954				finally:955					log_file.close()956					if log_file_real is not None:957						log_file_real.close()958				failure_log_shown = True959		# Dump mod_echo output now since it tends to flood the terminal.960		# This allows us to avoid having more important output, generated961		# later, from being swept away by the mod_echo output.962		mod_echo_output =  _flush_elog_mod_echo()963		if background and not failure_log_shown and \964			self._failed_pkgs_all and \965			self._failed_pkgs_die_msgs and \966			not mod_echo_output:967			for mysettings, key, logentries in self._failed_pkgs_die_msgs:968				root_msg = ""969				if mysettings["ROOT"] != "/":970					root_msg = " merged to %s" % mysettings["ROOT"]971				print()972				printer.einfo("Error messages for package %s%s:" % \973					(colorize("INFORM", key), root_msg))974				print()975				for phase in portage.const.EBUILD_PHASES:976					if phase not in logentries:977						continue978					for msgtype, msgcontent in logentries[phase]:979						if isinstance(msgcontent, basestring):980							msgcontent = [msgcontent]981						for line in msgcontent:982							printer.eerror(line.strip("\n"))983		if self._post_mod_echo_msgs:984			for msg in self._post_mod_echo_msgs:985				msg()986		if len(self._failed_pkgs_all) > 1 or \987			(self._failed_pkgs_all and keep_going):988			if len(self._failed_pkgs_all) > 1:989				msg = "The following %d packages have " % \990					len(self._failed_pkgs_all) + \991					"failed to build, install, or execute postinst:"992			else:993				msg = "The following package has " + \994					"failed to build, install, or execute postinst:"995			printer.eerror("")996			for line in textwrap.wrap(msg, 72):997				printer.eerror(line)998			printer.eerror("")999			for failed_pkg in self._failed_pkgs_all:1000				# Use unicode_literals to force unicode format string so1001				# that Package.__unicode__() is called in python2.1002				msg = " %s" % (failed_pkg.pkg,)1003				if failed_pkg.postinst_failure:1004					msg += " (postinst failed)"1005				log_path = self._locate_failure_log(failed_pkg)1006				if log_path is not None:1007					msg += ", Log file:"1008				printer.eerror(msg)1009				if log_path is not None:1010					printer.eerror("  '%s'" % colorize('INFORM', log_path))1011			printer.eerror("")1012		if self._failed_pkgs_all:1013			return FAILURE1014		return os.EX_OK1015	def _elog_listener(self, mysettings, key, logentries, fulltext):1016		errors = portage.elog.filter_loglevels(logentries, ["ERROR"])1017		if errors:1018			self._failed_pkgs_die_msgs.append(1019				(mysettings, key, errors))1020	def _locate_failure_log(self, failed_pkg):1021		log_paths = [failed_pkg.build_log]1022		for log_path in log_paths:1023			if not log_path:1024				continue1025			try:1026				log_size = os.stat(log_path).st_size1027			except OSError:1028				continue1029			if log_size == 0:1030				continue1031			return log_path1032		return None1033	def _add_packages(self):1034		pkg_queue = self._pkg_queue1035		for pkg in self._mergelist:1036			if isinstance(pkg, Package):1037				pkg_queue.append(pkg)1038			elif isinstance(pkg, Blocker):1039				pass1040	def _system_merge_started(self, merge):1041		"""1042		Add any unsatisfied runtime deps to self._unsatisfied_system_deps.1043		In general, this keeps track of installed system packages with1044		unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be1045		a fragile situation, so we don't execute any unrelated builds until1046		the circular dependencies are built and installed.1047		"""1048		graph = self._digraph1049		if graph is None:1050			return1051		pkg = merge.merge.pkg1052		# Skip this if $ROOT != / since it shouldn't matter if there1053		# are unsatisfied system runtime deps in this case.1054		if pkg.root_config.settings["ROOT"] != "/":1055			return1056		completed_tasks = self._completed_tasks1057		unsatisfied = self._unsatisfied_system_deps1058		def ignore_non_runtime_or_satisfied(priority):1059			"""1060			Ignore non-runtime and satisfied runtime priorities.1061			"""1062			if isinstance(priority, DepPriority) and \1063				not priority.satisfied and \1064				(priority.runtime or priority.runtime_post):1065				return False1066			return True1067		# When checking for unsatisfied runtime deps, only check1068		# direct deps since indirect deps are checked when the1069		# corresponding parent is merged.1070		for child in graph.child_nodes(pkg,1071			ignore_priority=ignore_non_runtime_or_satisfied):1072			if not isinstance(child, Package) or \1073				child.operation == 'uninstall':1074				continue1075			if child is pkg:1076				continue1077			if child.operation == 'merge' and \1078				child not in completed_tasks:1079				unsatisfied.add(child)1080	def _merge_wait_exit_handler(self, task):1081		self._merge_wait_scheduled.remove(task)1082		self._merge_exit(task)1083	def _merge_exit(self, merge):1084		self._running_tasks.pop(id(merge), None)1085		self._do_merge_exit(merge)1086		self._deallocate_config(merge.merge.settings)1087		if merge.returncode == os.EX_OK and \1088			not merge.merge.pkg.installed:1089			self._status_display.curval += 11090		self._status_display.merges = len(self._task_queues.merge)1091		self._schedule()1092	def _do_merge_exit(self, merge):1093		pkg = merge.merge.pkg1094		if merge.returncode != os.EX_OK:1095			settings = merge.merge.settings1096			build_dir = settings.get("PORTAGE_BUILDDIR")1097			build_log = settings.get("PORTAGE_LOG_FILE")1098			self._failed_pkgs.append(self._failed_pkg(1099				build_dir=build_dir, build_log=build_log,1100				pkg=pkg,1101				returncode=merge.returncode))1102			if not self._terminated_tasks:1103				self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")1104				self._status_display.failed = len(self._failed_pkgs)1105			return1106		if merge.postinst_failure:1107			# Append directly to _failed_pkgs_all for non-critical errors.1108			self._failed_pkgs_all.append(self._failed_pkg(1109				build_dir=merge.merge.settings.get("PORTAGE_BUILDDIR"),1110				build_log=merge.merge.settings.get("PORTAGE_LOG_FILE"),1111				pkg=pkg,1112				postinst_failure=True,1113				returncode=merge.returncode))1114			self._failed_pkg_msg(self._failed_pkgs_all[-1],1115				"execute postinst for", "for")1116		self._task_complete(pkg)1117		pkg_to_replace = merge.merge.pkg_to_replace1118		if pkg_to_replace is not None:1119			# When a package is replaced, mark it's uninstall1120			# task complete (if any).1121			if self._digraph is not None and \1122				pkg_to_replace in self._digraph:1123				try:1124					self._pkg_queue.remove(pkg_to_replace)1125				except ValueError:1126					pass1127				self._task_complete(pkg_to_replace)1128			else:1129				self._pkg_cache.pop(pkg_to_replace, None)1130		if pkg.installed:1131			return1132		# Call mtimedb.commit() after each merge so that1133		# --resume still works after being interrupted1134		# by reboot, sigkill or similar.1135		mtimedb = self._mtimedb1136		mtimedb["resume"]["mergelist"].remove(list(pkg))1137		if not mtimedb["resume"]["mergelist"]:1138			del mtimedb["resume"]1139		mtimedb.commit()1140	def _build_exit(self, build):1141		self._running_tasks.pop(id(build), None)1142		if build.returncode == os.EX_OK and self._terminated_tasks:1143			# We've been interrupted, so we won't1144			# add this to the merge queue.1145			self.curval += 11146			self._deallocate_config(build.settings)1147		elif build.returncode == os.EX_OK:1148			self.curval += 11149			merge = PackageMerge(merge=build)1150			self._running_tasks[id(merge)] = merge1151			if not build.build_opts.buildpkgonly and \1152				build.pkg in self._deep_system_deps:1153				# Since dependencies on system packages are frequently1154				# unspecified, merge them only when no builds are executing.1155				self._merge_wait_queue.append(merge)1156				merge.addStartListener(self._system_merge_started)1157			else:1158				merge.addExitListener(self._merge_exit)1159				self._task_queues.merge.add(merge)1160				self._status_display.merges = len(self._task_queues.merge)1161		else:1162			settings = build.settings1163			build_dir = settings.get("PORTAGE_BUILDDIR")1164			build_log = settings.get("PORTAGE_LOG_FILE")1165			self._failed_pkgs.append(self._failed_pkg(1166				build_dir=build_dir, build_log=build_log,1167				pkg=build.pkg,1168				returncode=build.returncode))1169			if not self._terminated_tasks:1170				self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")1171				self._status_display.failed = len(self._failed_pkgs)1172			self._deallocate_config(build.settings)1173		self._jobs -= 11174		self._status_display.running = self._jobs1175		self._schedule()1176	def _extract_exit(self, build):1177		self._build_exit(build)1178	def _task_complete(self, pkg):1179		self._completed_tasks.add(pkg)1180		self._unsatisfied_system_deps.discard(pkg)1181		self._choose_pkg_return_early = False1182		blocker_db = self._blocker_db[pkg.root]1183		blocker_db.discardBlocker(pkg)1184	def _main_loop(self):1185		self._main_exit = self._event_loop.create_future()1186		if self._max_load is not None and \1187			self._loadavg_latency is not None and \1188			(self._max_jobs is True or self._max_jobs > 1):1189			# We have to schedule periodically, in case the load1190			# average has changed since the last call.1191			self._main_loadavg_handle = self._event_loop.call_later(1192				self._loadavg_latency, self._schedule)1193		self._schedule()1194		self._event_loop.run_until_complete(self._main_exit)1195	def _merge(self):1196		if self._opts_no_background.intersection(self.myopts):1197			self._set_max_jobs(1)1198		self._add_prefetchers()1199		self._add_packages()1200		failed_pkgs = self._failed_pkgs1201		portage.locks._quiet = self._background1202		portage.elog.add_listener(self._elog_listener)1203		def display_callback():1204			self._status_display.display()1205			display_callback.handle = self._event_loop.call_later(1206				self._max_display_latency, display_callback)1207		display_callback.handle = None1208		if self._status_display._isatty and not self._status_display.quiet:1209			display_callback()1210		rval = os.EX_OK1211		try:1212			self._main_loop()1213		finally:1214			self._main_loop_cleanup()1215			portage.locks._quiet = False1216			portage.elog.remove_listener(self._elog_listener)1217			if display_callback.handle is not None:1218				display_callback.handle.cancel()1219			if failed_pkgs:1220				rval = failed_pkgs[-1].returncode1221		return rval1222	def _main_loop_cleanup(self):1223		del self._pkg_queue[:]1224		self._completed_tasks.clear()1225		self._deep_system_deps.clear()1226		self._unsatisfied_system_deps.clear()1227		self._choose_pkg_return_early = False1228		self._status_display.reset()1229		self._digraph = None1230		self._task_queues.fetch.clear()1231		self._prefetchers.clear()1232		self._main_exit = None1233		if self._main_loadavg_handle is not None:1234			self._main_loadavg_handle.cancel()1235			self._main_loadavg_handle = None1236		if self._job_delay_timeout_id is not None:1237			self._job_delay_timeout_id.cancel()1238			self._job_delay_timeout_id = None1239	def _choose_pkg(self):1240		"""1241		Choose a task that has all its dependencies satisfied. This is used1242		for parallel build scheduling, and ensures that we don't build1243		anything with deep dependencies that have yet to be merged.1244		"""1245		if self._choose_pkg_return_early:1246			return None1247		if self._digraph is None:1248			if self._is_work_scheduled() and \1249				not ("--nodeps" in self.myopts and \1250				(self._max_jobs is True or self._max_jobs > 1)):1251				self._choose_pkg_return_early = True1252				return None1253			return self._pkg_queue.pop(0)1254		if not self._is_work_scheduled():1255			return self._pkg_queue.pop(0)1256		self._prune_digraph()1257		chosen_pkg = None1258		# Prefer uninstall operations when available.1259		graph = self._digraph1260		for pkg in self._pkg_queue:1261			if pkg.operation == 'uninstall' and \1262				not graph.child_nodes(pkg):1263				chosen_pkg = pkg1264				break1265		if chosen_pkg is None:1266			later = set(self._pkg_queue)1267			for pkg in self._pkg_queue:1268				later.remove(pkg)1269				if not self._dependent_on_scheduled_merges(pkg, later):1270					chosen_pkg = pkg1271					break1272		if chosen_pkg is not None:1273			self._pkg_queue.remove(chosen_pkg)1274		if chosen_pkg is None:1275			# There's no point in searching for a package to1276			# choose until at least one of the existing jobs1277			# completes.1278			self._choose_pkg_return_early = True1279		return chosen_pkg1280	def _dependent_on_scheduled_merges(self, pkg, later):1281		"""1282		Traverse the subgraph of the given packages deep dependencies1283		to see if it contains any scheduled merges.1284		@param pkg: a package to check dependencies for1285		@type pkg: Package1286		@param later: packages for which dependence should be ignored1287			since they will be merged later than pkg anyway and therefore1288			delaying the merge of pkg will not result in a more optimal1289			merge order1290		@type later: set1291		@rtype: bool1292		@return: True if the package is dependent, False otherwise.1293		"""1294		graph = self._digraph1295		completed_tasks = self._completed_tasks1296		dependent = False1297		traversed_nodes = set([pkg])1298		direct_deps = graph.child_nodes(pkg)1299		node_stack = direct_deps1300		direct_deps = frozenset(direct_deps)1301		while node_stack:1302			node = node_stack.pop()1303			if node in traversed_nodes:1304				continue1305			traversed_nodes.add(node)1306			if not ((node.installed and node.operation == "nomerge") or \1307				(node.operation == "uninstall" and \1308				node not in direct_deps) or \1309				node in completed_tasks or \1310				node in later):1311				dependent = True1312				break1313			# Don't traverse children of uninstall nodes since1314			# those aren't dependencies in the usual sense.1315			if node.operation != "uninstall":1316				node_stack.extend(graph.child_nodes(node))1317		return dependent1318	def _allocate_config(self, root):1319		"""1320		Allocate a unique config instance for a task in order1321		to prevent interference between parallel tasks.1322		"""1323		if self._config_pool[root]:1324			temp_settings = self._config_pool[root].pop()1325		else:1326			temp_settings = portage.config(clone=self.pkgsettings[root])1327		# Since config.setcpv() isn't guaranteed to call config.reset() due to1328		# performance reasons, call it here to make sure all settings from the1329		# previous package get flushed out (such as PORTAGE_LOG_FILE).1330		temp_settings.reload()1331		temp_settings.reset()1332		return temp_settings1333	def _deallocate_config(self, settings):1334		self._config_pool[settings['EROOT']].append(settings)1335	def _keep_scheduling(self):1336		return bool(not self._terminated.is_set() and self._pkg_queue and \1337			not (self._failed_pkgs and not self._build_opts.fetchonly))1338	def _is_work_scheduled(self):1339		return bool(self._running_tasks)1340	def _running_job_count(self):1341		return self._jobs1342	def _schedule_tasks(self):1343		while True:1344			state_change = 01345			# When the number of jobs and merges drops to zero,1346			# process a single merge from _merge_wait_queue if1347			# it's not empty. We only process one since these are1348			# special packages and we want to ensure that1349			# parallel-install does not cause more than one of1350			# them to install at the same time.1351			if (self._merge_wait_queue and not self._jobs and1352				not self._task_queues.merge):1353				task = self._merge_wait_queue.popleft()1354				task.addExitListener(self._merge_wait_exit_handler)1355				self._merge_wait_scheduled.append(task)1356				self._task_queues.merge.add(task)1357				self._status_display.merges = len(self._task_queues.merge)1358				state_change += 11359			if self._schedule_tasks_imp():1360				state_change += 11361			self._status_display.display()1362			# Cancel prefetchers if they're the only reason1363			# the main poll loop is still running.1364			if self._failed_pkgs and not self._build_opts.fetchonly and \1365				not self._is_work_scheduled() and \1366				self._task_queues.fetch:1367				# Since this happens asynchronously, it doesn't count in1368				# state_change (counting it triggers an infinite loop).1369				self._task_queues.fetch.clear()1370			if not (state_change or \1371				(self._merge_wait_queue and not self._jobs and1372				not self._task_queues.merge)):1373				break1374		if not (self._is_work_scheduled() or1375			self._keep_scheduling() or self._main_exit.done()):1376			self._main_exit.set_result(None)1377		elif self._main_loadavg_handle is not None:1378			self._main_loadavg_handle.cancel()1379			self._main_loadavg_handle = self._event_loop.call_later(1380				self._loadavg_latency, self._schedule)1381	def _sigcont_handler(self, signum, frame):1382		self._sigcont_time = time.time()1383	def _job_delay(self):1384		"""1385		@rtype: bool1386		@return: True if job scheduling should be delayed, False otherwise.1387		"""1388		if self._jobs and self._max_load is not None:1389			current_time = time.time()1390			if self._sigcont_time is not None:1391				elapsed_seconds = current_time - self._sigcont_time1392				# elapsed_seconds < 0 means the system clock has been adjusted1393				if elapsed_seconds > 0 and \1394					elapsed_seconds < self._sigcont_delay:1395					if self._job_delay_timeout_id is not None:1396						self._job_delay_timeout_id.cancel()1397					self._job_delay_timeout_id = self._event_loop.call_later(1398						self._sigcont_delay - elapsed_seconds,1399						self._schedule)1400					return True1401				# Only set this to None after the delay has expired,1402				# since this method may be called again before the1403				# delay has expired.1404				self._sigcont_time = None1405			try:1406				avg1, avg5, avg15 = getloadavg()1407			except OSError:1408				return False1409			delay = self._job_delay_max * avg1 / self._max_load1410			if delay > self._job_delay_max:1411				delay = self._job_delay_max1412			elapsed_seconds = current_time - self._previous_job_start_time1413			# elapsed_seconds < 0 means the system clock has been adjusted1414			if elapsed_seconds > 0 and elapsed_seconds < delay:1415				if self._job_delay_timeout_id is not None:1416					self._job_delay_timeout_id.cancel()1417				self._job_delay_timeout_id = self._event_loop.call_later(1418					delay - elapsed_seconds, self._schedule)1419				return True1420		return False1421	def _schedule_tasks_imp(self):1422		"""1423		@rtype: bool1424		@return: True if state changed, False otherwise.1425		"""1426		state_change = 01427		while True:1428			if not self._keep_scheduling():1429				return bool(state_change)1430			if self._choose_pkg_return_early or \1431				self._merge_wait_scheduled or \1432				(self._jobs and self._unsatisfied_system_deps) or \1433				not self._can_add_job() or \1434				self._job_delay():1435				return bool(state_change)1436			pkg = self._choose_pkg()1437			if pkg is None:1438				return bool(state_change)1439			state_change += 11440			if not pkg.installed:1441				self._pkg_count.curval += 11442			task = self._task(pkg)1443			if pkg.installed:1444				merge = PackageMerge(merge=task)1445				self._running_tasks[id(merge)] = merge1446				merge.addExitListener(self._merge_exit)1447				self._task_queues.merge.addFront(merge)1448			elif pkg.built:1449				self._jobs += 11450				self._previous_job_start_time = time.time()1451				self._status_display.running = self._jobs1452				self._running_tasks[id(task)] = task1453				task.addExitListener(self._extract_exit)1454				self._task_queues.jobs.add(task)1455			else:1456				self._jobs += 11457				self._previous_job_start_time = time.time()1458				self._status_display.running = self._jobs1459				self._running_tasks[id(task)] = task1460				task.addExitListener(self._build_exit)1461				self._task_queues.jobs.add(task)1462		return bool(state_change)1463	def _task(self, pkg):1464		pkg_to_replace = None1465		if pkg.operation != "uninstall":1466			vardb = pkg.root_config.trees["vartree"].dbapi1467			previous_cpv = [x for x in vardb.match(pkg.slot_atom) \1468				if portage.cpv_getkey(x) == pkg.cp]1469			if not previous_cpv and vardb.cpv_exists(pkg.cpv):1470				# same cpv, different SLOT1471				previous_cpv = [pkg.cpv]1472			if previous_cpv:1473				previous_cpv = previous_cpv.pop()1474				pkg_to_replace = self._pkg(previous_cpv,1475					"installed", pkg.root_config, installed=True,1476					operation="uninstall")1477		try:1478			prefetcher = self._prefetchers.pop(pkg, None)1479		except KeyError:1480			# KeyError observed with PyPy 1.8, despite None given as default.1481			# Note that PyPy 1.8 has the same WeakValueDictionary code as1482			# CPython 2.7, so it may be possible for CPython to raise KeyError1483			# here as well.1484			prefetcher = None1485		if prefetcher is not None and not prefetcher.isAlive():1486			try:1487				self._task_queues.fetch._task_queue.remove(prefetcher)1488			except ValueError:1489				pass1490			prefetcher = None1491		task = MergeListItem(args_set=self._args_set,1492			background=self._background, binpkg_opts=self._binpkg_opts,1493			build_opts=self._build_opts,1494			config_pool=self._ConfigPool(pkg.root,1495			self._allocate_config, self._deallocate_config),1496			emerge_opts=self.myopts,1497			find_blockers=self._find_blockers(pkg), logger=self._logger,1498			mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),1499			pkg_to_replace=pkg_to_replace,1500			prefetcher=prefetcher,1501			scheduler=self._sched_iface,1502			settings=self._allocate_config(pkg.root),1503			statusMessage=self._status_msg,1504			world_atom=self._world_atom)1505		return task1506	def _failed_pkg_msg(self, failed_pkg, action, preposition):1507		pkg = failed_pkg.pkg1508		msg = "%s to %s %s" % \1509			(bad("Failed"), action, colorize("INFORM", pkg.cpv))1510		if pkg.root_config.settings["ROOT"] != "/":1511			msg += " %s %s" % (preposition, pkg.root)1512		log_path = self._locate_failure_log(failed_pkg)1513		if log_path is not None:1514			msg += ", Log file:"1515		self._status_msg(msg)1516		if log_path is not None:1517			self._status_msg(" '%s'" % (colorize("INFORM", log_path),))1518	def _status_msg(self, msg):1519		"""1520		Display a brief status message (no newlines) in the status display.1521		This is called by tasks to provide feedback to the user. This1522		delegates the resposibility of generating \r and \n control characters,1523		to guarantee that lines are created or erased when necessary and1524		appropriate.1525		@type msg: str1526		@param msg: a brief status message (no newlines allowed)1527		"""1528		if not self._background:1529			writemsg_level("\n")1530		self._status_display.displayMessage(msg)1531	def _save_resume_list(self):1532		"""1533		Do this before verifying the ebuild Manifests since it might1534		be possible for the user to use --resume --skipfirst get past1535		a non-essential package with a broken digest.1536		"""1537		mtimedb = self._mtimedb1538		mtimedb["resume"] = {}1539		# Stored as a dict starting with portage-2.1.6_rc1, and supported1540		# by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support1541		# a list type for options.1542		mtimedb["resume"]["myopts"] = self.myopts.copy()1543		# Convert Atom instances to plain str.1544		mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]1545		mtimedb["resume"]["mergelist"] = [list(x) \1546			for x in self._mergelist \1547			if isinstance(x, Package) and x.operation == "merge"]1548		mtimedb.commit()1549	def _calc_resume_list(self):1550		"""1551		Use the current resume list to calculate a new one,1552		dropping any packages with unsatisfied deps.1553		@rtype: bool1554		@return: True if successful, False otherwise.1555		"""1556		print(colorize("GOOD", "*** Resuming merge..."))1557		# free some memory before creating1558		# the resume depgraph1559		self._destroy_graph()1560		myparams = create_depgraph_params(self.myopts, None)1561		success = False1562		e = None1563		try:1564			success, mydepgraph, dropped_tasks = resume_depgraph(1565				self.settings, self.trees, self._mtimedb, self.myopts,1566				myparams, self._spinner)1567		except depgraph.UnsatisfiedResumeDep as exc:1568			# rename variable to avoid python-3.0 error:1569			# SyntaxError: can not delete variable 'e' referenced in nested1570			#              scope1571			e = exc1572			mydepgraph = e.depgraph1573			dropped_tasks = {}1574		if e is not None:1575			def unsatisfied_resume_dep_msg():1576				mydepgraph.display_problems()1577				out = portage.output.EOutput()1578				out.eerror("One or more packages are either masked or " + \1579					"have missing dependencies:")1580				out.eerror("")1581				indent = "  "1582				show_parents = set()1583				for dep in e.value:1584					if dep.parent in show_parents:1585						continue1586					show_parents.add(dep.parent)1587					if dep.atom is None:1588						out.eerror(indent + "Masked package:")1589						out.eerror(2 * indent + str(dep.parent))1590						out.eerror("")1591					else:1592						out.eerror(indent + str(dep.atom) + " pulled in by:")1593						out.eerror(2 * indent + str(dep.parent))1594						out.eerror("")1595				msg = "The resume list contains packages " + \1596					"that are either masked or have " + \1597					"unsatisfied dependencies. " + \1598					"Please restart/continue " + \1599					"the operation manually, or use --skipfirst " + \1600					"to skip the first package in the list and " + \1601					"any other packages that may be " + \1602					"masked or have missing dependencies."1603				for line in textwrap.wrap(msg, 72):1604					out.eerror(line)1605			self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)1606			return False1607		if success and self._show_list():1608			mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)1609		if not success:1610			self._post_mod_echo_msgs.append(mydepgraph.display_problems)1611			return False1612		mydepgraph.display_problems()1613		self._init_graph(mydepgraph.schedulerGraph())1614		msg_width = 751615		for task, atoms in dropped_tasks.items():1616			if not (isinstance(task, Package) and task.operation == "merge"):1617				continue1618			pkg = task1619			msg = "emerge --keep-going:" + \1620				" %s" % (pkg.cpv,)1621			if pkg.root_config.settings["ROOT"] != "/":1622				msg += " for %s" % (pkg.root,)1623			if not atoms:1624				msg += " dropped because it is masked or unavailable"1625			else:1626				msg += " dropped because it requires %s" % ", ".join(atoms)1627			for line in textwrap.wrap(msg, msg_width):1628				eerror(line, phase="other", key=pkg.cpv)1629			settings = self.pkgsettings[pkg.root]1630			# Ensure that log collection from $T is disabled inside1631			# elog_process(), since any logs that might exist are1632			# not valid here.1633			settings.pop("T", None)1634			portage.elog.elog_process(pkg.cpv, settings)1635			self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))1636		return True1637	def _show_list(self):1638		myopts = self.myopts1639		if "--quiet" not in myopts and \1640			("--ask" in myopts or "--tree" in myopts or \1641			"--verbose" in myopts):1642			return True1643		return False1644	def _world_atom(self, pkg):1645		"""1646		Add or remove the package to the world file, but only if1647		it's supposed to be added or removed. Otherwise, do nothing.1648		"""1649		if set(("--buildpkgonly", "--fetchonly",1650			"--fetch-all-uri",1651			"--oneshot", "--onlydeps",1652			"--pretend")).intersection(self.myopts):1653			return1654		if pkg.root != self.target_root:1655			return1656		args_set = self._args_set1657		if not args_set.findAtomForPackage(pkg):1658			return1659		logger = self._logger1660		pkg_count = self._pkg_count1661		root_config = pkg.root_config1662		world_set = root_config.sets["selected"]1663		world_locked = False1664		atom = None1665		if pkg.operation != "uninstall":1666			atom = self._world_atoms.get(pkg)1667		try:1668			if hasattr(world_set, "lock"):1669				world_set.lock()1670				world_locked = True1671			if hasattr(world_set, "load"):1672				world_set.load() # maybe it's changed on disk1673			if pkg.operation == "uninstall":1674				if hasattr(world_set, "cleanPackage"):1675					world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,1676							pkg.cpv)1677				if hasattr(world_set, "remove"):1678					for s in pkg.root_config.setconfig.active:1679						world_set.remove(SETPREFIX+s)1680			else:1681				if atom is not None:1682					if hasattr(world_set, "add"):1683						self._status_msg(('Recording %s in "world" ' + \1684							'favorites file...') % atom)1685						logger.log(" === (%s of %s) Updating world file (%s)" % \1686							(pkg_count.curval, pkg_count.maxval, pkg.cpv))1687						world_set.add(atom)1688					else:1689						writemsg_level('\n!!! Unable to record %s in "world"\n' % \1690							(atom,), level=logging.WARN, noiselevel=-1)1691		finally:1692			if world_locked:1693				world_set.unlock()1694	def _pkg(self, cpv, type_name, root_config, installed=False,1695		operation=None, myrepo=None):1696		"""1697		Get a package instance from the cache, or create a new1698		one if necessary. Raises KeyError from aux_get if it1699		failures for some reason (package does not exist or is1700		corrupt).1701		"""1702		# Reuse existing instance when available.1703		pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,1704			type_name=type_name, repo_name=myrepo, root_config=root_config,1705			installed=installed, operation=operation))1706		if pkg is not None:1707			return pkg1708		tree_type = depgraph.pkg_tree_map[type_name]1709		db = root_config.trees[tree_type].dbapi1710		db_keys = list(self.trees[root_config.root][1711			tree_type].dbapi._aux_cache_keys)1712		metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))1713		pkg = Package(built=(type_name != "ebuild"),1714			cpv=cpv, installed=installed, metadata=metadata,1715			root_config=root_config, type_name=type_name)1716		self._pkg_cache[pkg] = pkg...__init__.py
Source:__init__.py  
...298                   copyfrom_path and _cleanup_path(copyfrom_path),299                   copyfrom_rev)300  return entry301  302def _fetch_log(svnrepos, full_name, which_rev, options, pool):303  revs = []304  if options.get('svn_latest_log', 0):305    rev = _log_helper(svnrepos, which_rev, full_name, pool)306    if rev:307      revs.append(rev)308  else:309    history_set = _get_history(svnrepos, full_name, which_rev, options)310    history_revs = history_set.keys()311    history_revs.sort()312    history_revs.reverse()313    subpool = core.svn_pool_create(pool)314    for history_rev in history_revs:315      core.svn_pool_clear(subpool)316      rev = _log_helper(svnrepos, history_rev, history_set[history_rev],317                        subpool)318      if rev:319        revs.append(rev)320    core.svn_pool_destroy(subpool)321  return revs322def _get_last_history_rev(fsroot, path, pool):323  history = fs.node_history(fsroot, path, pool)324  history = fs.history_prev(history, 0, pool)325  history_path, history_rev = fs.history_location(history, pool);326  return history_rev327  328  329def get_logs(svnrepos, full_name, rev, files):330  fsroot = svnrepos._getroot(rev)331  subpool = core.svn_pool_create(svnrepos.pool)332  for file in files:333    core.svn_pool_clear(subpool)334    path = _fs_path_join(full_name, file.name)335    rev = _get_last_history_rev(fsroot, path, subpool)336    datestr, author, msg = _fs_rev_props(svnrepos.fs_ptr, rev, subpool)337    date = _datestr_to_date(datestr, subpool)338    file.rev = str(rev)339    file.date = date340    file.author = author341    file.log = msg342    if file.kind == vclib.FILE:343      file.size = fs.file_length(fsroot, path, subpool)344  core.svn_pool_destroy(subpool)345def get_youngest_revision(svnrepos):346  return svnrepos.youngest347def temp_checkout(svnrepos, path, rev, pool):348  """Check out file revision to temporary file"""349  temp = tempfile.mktemp()350  fp = open(temp, 'wb')351  try:352    root = svnrepos._getroot(rev)353    stream = fs.file_contents(root, path, pool)354    try:355      while 1:356        chunk = core.svn_stream_read(stream, core.SVN_STREAM_CHUNK_SIZE)357        if not chunk:358          break359        fp.write(chunk)360    finally:361      core.svn_stream_close(stream)362  finally:363    fp.close()364  return temp365class FileContentsPipe:366  def __init__(self, root, path, pool):367    self._pool = core.svn_pool_create(pool)368    self._stream = fs.file_contents(root, path, self._pool)369    self._eof = 0370  def __del__(self):371    core.svn_pool_destroy(self._pool)372    373  def read(self, len=None):374    chunk = None375    if not self._eof:376      if len is None:377        buffer = cStringIO.StringIO()378        try:379          while 1:380            hunk = core.svn_stream_read(self._stream, 8192)381            if not hunk:382              break383            buffer.write(hunk)384          chunk = buffer.getvalue()385        finally:386          buffer.close()387      else:388        chunk = core.svn_stream_read(self._stream, len)   389    if not chunk:390      self._eof = 1391    return chunk392  393  def readline(self):394    chunk = None395    if not self._eof:396      chunk, self._eof = core.svn_stream_readline(self._stream, '\n',397                                                  self._pool)398      if not self._eof:399        chunk = chunk + '\n'400    if not chunk:401      self._eof = 1402    return chunk403  def readlines(self):404    lines = []405    while True:406      line = self.readline()407      if not line:408        break409      lines.append(line)410    return lines411  def close(self):412    return core.svn_stream_close(self._stream)413  def eof(self):414    return self._eof415_re_blameinfo = re.compile(r"\s*(\d+)\s*(.*)")416class BlameSource:417  def __init__(self, svn_client_path, rootpath, fs_path, rev, first_rev):418    self.idx = -1419    self.line_number = 1420    self.last = None421    self.first_rev = first_rev422    423    rootpath = os.path.abspath(rootpath)424    url = 'file://' + string.join([rootpath, fs_path], "/")425    fp = popen.popen(svn_client_path,426                     ('blame', "-r%d" % int(rev), "%s@%d" % (url, int(rev))),427                     'rb', 1)428    self.fp = fp429    430  def __getitem__(self, idx):431    if idx == self.idx:432      return self.last433    if idx != self.idx + 1:434      raise BlameSequencingError()435    line = self.fp.readline()436    if not line:437      raise IndexError("No more annotations")438    m = _re_blameinfo.match(line[:17])439    if not m:440      raise vclib.Error("Could not parse blame output at line %i\n%s"441                        % (idx+1, line))442    rev, author = m.groups()443    text = line[18:]444    rev = int(rev)445    prev_rev = None446    if rev > self.first_rev:447      prev_rev = rev - 1448    item = _item(text=text, line_number=idx+1, rev=rev,449                 prev_rev=prev_rev, author=author, date=None)450    self.last = item451    self.idx = idx452    return item453class BlameSequencingError(Exception):454  pass455  456class SubversionRepository(vclib.Repository):457  def __init__(self, name, rootpath, svn_path):458    if not os.path.isdir(rootpath):459      raise vclib.ReposNotFound(name)460    # Initialize some stuff.461    self.pool = None462    self.apr_init = 0463    self.rootpath = rootpath464    self.name = name465    self.svn_client_path = os.path.normpath(os.path.join(svn_path, 'svn'))466    # Register a handler for SIGTERM so we can have a chance to467    # cleanup.  If ViewVC takes too long to start generating CGI468    # output, Apache will grow impatient and SIGTERM it.  While we469    # don't mind getting told to bail, we want to gracefully close the470    # repository before we bail.471    def _sigterm_handler(signum, frame, self=self):472      self._close()473      sys.exit(-1)474    try:475      signal.signal(signal.SIGTERM, _sigterm_handler)476    except ValueError:477      # This is probably "ValueError: signal only works in main478      # thread", which will get thrown by the likes of mod_python479      # when trying to install a signal handler from a thread that480      # isn't the main one.  We'll just not care.481      pass482    # Initialize APR and get our top-level pool.483    core.apr_initialize()484    self.apr_init = 1485    self.pool = core.svn_pool_create(None)486    self.scratch_pool = core.svn_pool_create(self.pool)487    488    # Open the repository and init some other variables.489    self.repos = repos.svn_repos_open(rootpath, self.pool)490    self.fs_ptr = repos.svn_repos_fs(self.repos)491    self.youngest = fs.youngest_rev(self.fs_ptr, self.pool)492    self._fsroots = {}493  def __del__(self):494    self._close()495    496  def _close(self):497    if self.pool:498      core.svn_pool_destroy(self.pool)499      self.pool = None500    if self.apr_init:501      core.apr_terminate()502      self.apr_init = 0503  def _scratch_clear(self):504    core.svn_pool_clear(self.scratch_pool)505  def itemtype(self, path_parts, rev):506    rev = self._getrev(rev)507    basepath = self._getpath(path_parts)508    kind = fs.check_path(self._getroot(rev), basepath, self.scratch_pool)509    self._scratch_clear()510    if kind == core.svn_node_dir:511      return vclib.DIR512    if kind == core.svn_node_file:513      return vclib.FILE514    raise vclib.ItemNotFound(path_parts)515  def openfile(self, path_parts, rev):516    path = self._getpath(path_parts)517    rev = self._getrev(rev)518    fsroot = self._getroot(rev)519    revision = str(_get_last_history_rev(fsroot, path, self.scratch_pool))520    self._scratch_clear()521    fp = FileContentsPipe(fsroot, path, self.pool)522    return fp, revision523  def listdir(self, path_parts, rev, options):524    basepath = self._getpath(path_parts)525    if self.itemtype(path_parts, rev) != vclib.DIR:526      raise vclib.Error("Path '%s' is not a directory." % basepath)527    rev = self._getrev(rev)528    fsroot = self._getroot(rev)529    dirents = fs.dir_entries(fsroot, basepath, self.scratch_pool)530    entries = [ ]531    for entry in dirents.values():532      if entry.kind == core.svn_node_dir:533        kind = vclib.DIR534      elif entry.kind == core.svn_node_file:535        kind = vclib.FILE              536      entries.append(vclib.DirEntry(entry.name, kind))537    self._scratch_clear()538    return entries539  def dirlogs(self, path_parts, rev, entries, options):540    get_logs(self, self._getpath(path_parts), self._getrev(rev), entries)541  def itemlog(self, path_parts, rev, options):542    """see vclib.Repository.itemlog docstring543    Option values recognized by this implementation544      svn_show_all_dir_logs545        boolean, default false. if set for a directory path, will include546        revisions where files underneath the directory have changed547      svn_cross_copies548        boolean, default false. if set for a path created by a copy, will549        include revisions from before the copy550      svn_latest_log551        boolean, default false. if set will return only newest single log552        entry553    """554    path = self._getpath(path_parts)555    rev = self._getrev(rev)556    revs = _fetch_log(self, path, rev, options, self.scratch_pool)557    self._scratch_clear()558    559    revs.sort()560    prev = None561    for rev in revs:562      rev.prev = prev563      prev = rev564    return revs565  def annotate(self, path_parts, rev):566    path = self._getpath(path_parts)567    rev = self._getrev(rev)568    fsroot = self._getroot(rev)569    history_set = _get_history(self, path, rev, {'svn_cross_copies': 1})570    history_revs = history_set.keys()...utils.py
Source:utils.py  
...435                ['git', 'log', '-1', '--format=%h'],436                cwd=self.repo_path,437                stdout=subprocess.PIPE).communicate()[0].strip('\n')438        return self._version439    def _fetch_log(self):440        if not self._last_msg:441            self._version = subprocess.Popen(442                ['git', 'log', '-1', '--format=%s'],443                cwd=self.repo_path,444                stdout=subprocess.PIPE).communicate()[0].strip('\n')445        return self._last_msg446    @property447    def last_msg(self):448        if not self._last_msg:449            self._fetch_log()450        return self._last_msg451class DirObjectLoader(object):452    '''453    Mixin for loading creating and loading objects corresponding to directories454    on the filesystem.455    The directory where to load objects from should contain a list of456    directories with a numerical name.457    '''458    def load_from_dir(self, directory, class_, *args):459        objects = OrderedDict()460        ids = [id for id in os.listdir(directory) if461            os.path.isdir(join(directory, id))]462        for id in sorted(ids, key=int):463            path = join(directory, id)..._fetch.py
Source:_fetch.py  
...371        )372    @property373    def _fetch_uid(self) -> str:374        return self._initializer["fetchUid"]375    async def _fetch_log(self) -> List[str]:376        return await self._request._channel.send(377            "fetchLog",378            {379                "fetchUid": self._fetch_uid,380            },381        )382def is_json_content_type(headers: network.HeadersArray = None) -> bool:383    if not headers:384        return False385    for header in headers:386        if header["name"] == "Content-Type":387            return header["value"].startswith("application/json")...LambdaTest’s Playwright tutorial will give you a broader idea about the Playwright automation framework, its unique features, and use cases with examples to exceed your understanding of Playwright testing. This tutorial will give A to Z guidance, from installing the Playwright framework to some best practices and advanced concepts.
Get 100 minutes of automation test minutes FREE!!
