# Distributed under the terms of the GNU General Public License v2
# $Id$
-import array
-import codecs
-from collections import deque
-import fcntl
import formatter
import logging
import pwd
import select
import shlex
-import shutil
import signal
import sys
import textwrap
-import urlparse
import weakref
import gc
import os, stat
import _emerge.help
import portage.xpak, commands, errno, re, socket, time
-from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
+from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
nc_len, red, teal, turquoise, xtermTitle, \
xtermTitleReset, yellow
from portage.output import create_color_func
from itertools import chain, izip
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
+from _emerge.SlotObject import SlotObject
+from _emerge.DepPriority import DepPriority
+from _emerge.BlockerDepPriority import BlockerDepPriority
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.Task import Task
+from _emerge.Blocker import Blocker
+from _emerge.PollConstants import PollConstants
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.EbuildBuild import EbuildBuild
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.Binpkg import Binpkg
+from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
+from _emerge.PackageMerge import PackageMerge
+from _emerge.DependencyArg import DependencyArg
+from _emerge.AtomArg import AtomArg
+from _emerge.PackageArg import PackageArg
+from _emerge.SetArg import SetArg
+from _emerge.Dependency import Dependency
+from _emerge.BlockerCache import BlockerCache
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from _emerge.RepoDisplay import RepoDisplay
+from _emerge.UseFlagDisplay import UseFlagDisplay
+from _emerge.PollSelectAdapter import PollSelectAdapter
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+from _emerge.ProgressHandler import ProgressHandler
try:
from cStringIO import StringIO
else:
yield flag
-class SlotObject(object):
- __slots__ = ("__weakref__",)
-
- def __init__(self, **kwargs):
- classes = [self.__class__]
- while classes:
- c = classes.pop()
- if c is SlotObject:
- continue
- classes.extend(c.__bases__)
- slots = getattr(c, "__slots__", None)
- if not slots:
- continue
- for myattr in slots:
- myvalue = kwargs.get(myattr, None)
- setattr(self, myattr, myvalue)
-
- def copy(self):
- """
- Create a new instance and copy all attributes
- defined from __slots__ (including those from
- inherited classes).
- """
- obj = self.__class__()
-
- classes = [self.__class__]
- while classes:
- c = classes.pop()
- if c is SlotObject:
- continue
- classes.extend(c.__bases__)
- slots = getattr(c, "__slots__", None)
- if not slots:
- continue
- for myattr in slots:
- setattr(obj, myattr, getattr(self, myattr))
-
- return obj
-
-class AbstractDepPriority(SlotObject):
- __slots__ = ("buildtime", "runtime", "runtime_post")
-
- def __lt__(self, other):
- return self.__int__() < other
-
- def __le__(self, other):
- return self.__int__() <= other
-
- def __eq__(self, other):
- return self.__int__() == other
-
- def __ne__(self, other):
- return self.__int__() != other
-
- def __gt__(self, other):
- return self.__int__() > other
-
- def __ge__(self, other):
- return self.__int__() >= other
-
- def copy(self):
- import copy
- return copy.copy(self)
-
-class DepPriority(AbstractDepPriority):
-
- __slots__ = ("satisfied", "optional", "rebuild")
-
- def __int__(self):
- """
- Note: These priorities are only used for measuring hardness
- in the circular dependency display via digraph.debug_print(),
- and nothing more. For actual merge order calculations, the
- measures defined by the DepPriorityNormalRange and
- DepPrioritySatisfiedRange classes are used.
-
- Attributes Hardness
-
- buildtime 0
- runtime -1
- runtime_post -2
- optional -3
- (none of the above) -4
-
- """
-
- if self.buildtime:
- return 0
- if self.runtime:
- return -1
- if self.runtime_post:
- return -2
- if self.optional:
- return -3
- return -4
-
- def __str__(self):
- if self.optional:
- return "optional"
- if self.buildtime:
- return "buildtime"
- if self.runtime:
- return "runtime"
- if self.runtime_post:
- return "runtime_post"
- return "soft"
-
-class BlockerDepPriority(DepPriority):
- __slots__ = ()
- def __int__(self):
- return 0
-
- def __str__(self):
- return 'blocker'
-
-BlockerDepPriority.instance = BlockerDepPriority()
-
-class UnmergeDepPriority(AbstractDepPriority):
- __slots__ = ("optional", "satisfied",)
- """
- Combination of properties Priority Category
-
- runtime 0 HARD
- runtime_post -1 HARD
- buildtime -2 SOFT
- (none of the above) -2 SOFT
- """
-
- MAX = 0
- SOFT = -2
- MIN = -2
-
- def __int__(self):
- if self.runtime:
- return 0
- if self.runtime_post:
- return -1
- if self.buildtime:
- return -2
- return -2
-
- def __str__(self):
- myvalue = self.__int__()
- if myvalue > self.SOFT:
- return "hard"
- return "soft"
-
-class DepPriorityNormalRange(object):
- """
- DepPriority properties Index Category
-
- buildtime HARD
- runtime 3 MEDIUM
- runtime_post 2 MEDIUM_SOFT
- optional 1 SOFT
- (none of the above) 0 NONE
- """
- MEDIUM = 3
- MEDIUM_SOFT = 2
- SOFT = 1
- NONE = 0
-
- @classmethod
- def _ignore_optional(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- return bool(priority.optional)
-
- @classmethod
- def _ignore_runtime_post(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- return bool(priority.optional or priority.runtime_post)
-
- @classmethod
- def _ignore_runtime(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- return not priority.buildtime
-
- ignore_medium = _ignore_runtime
- ignore_medium_soft = _ignore_runtime_post
- ignore_soft = _ignore_optional
-
-DepPriorityNormalRange.ignore_priority = (
- None,
- DepPriorityNormalRange._ignore_optional,
- DepPriorityNormalRange._ignore_runtime_post,
- DepPriorityNormalRange._ignore_runtime
-)
-
-class DepPrioritySatisfiedRange(object):
- """
- DepPriority Index Category
-
- not satisfied and buildtime HARD
- not satisfied and runtime 7 MEDIUM
- not satisfied and runtime_post 6 MEDIUM_SOFT
- satisfied and buildtime and rebuild 5 SOFT
- satisfied and buildtime 4 SOFT
- satisfied and runtime 3 SOFT
- satisfied and runtime_post 2 SOFT
- optional 1 SOFT
- (none of the above) 0 NONE
- """
- MEDIUM = 7
- MEDIUM_SOFT = 6
- SOFT = 5
- NONE = 0
-
- @classmethod
- def _ignore_optional(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- return bool(priority.optional)
-
- @classmethod
- def _ignore_satisfied_runtime_post(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- if priority.optional:
- return True
- if not priority.satisfied:
- return False
- return bool(priority.runtime_post)
-
- @classmethod
- def _ignore_satisfied_runtime(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- if priority.optional:
- return True
- if not priority.satisfied:
- return False
- return not priority.buildtime
-
- @classmethod
- def _ignore_satisfied_buildtime(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- if priority.optional:
- return True
- if not priority.satisfied:
- return False
- if priority.buildtime:
- return not priority.rebuild
- return True
-
- @classmethod
- def _ignore_satisfied_buildtime_rebuild(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- if priority.optional:
- return True
- return bool(priority.satisfied)
-
- @classmethod
- def _ignore_runtime_post(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- return bool(priority.optional or \
- priority.satisfied or \
- priority.runtime_post)
-
- @classmethod
- def _ignore_runtime(cls, priority):
- if priority.__class__ is not DepPriority:
- return False
- return bool(priority.satisfied or \
- not priority.buildtime)
-
- ignore_medium = _ignore_runtime
- ignore_medium_soft = _ignore_runtime_post
- ignore_soft = _ignore_satisfied_buildtime_rebuild
-
-DepPrioritySatisfiedRange.ignore_priority = (
- None,
- DepPrioritySatisfiedRange._ignore_optional,
- DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
- DepPrioritySatisfiedRange._ignore_satisfied_runtime,
- DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
- DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
- DepPrioritySatisfiedRange._ignore_runtime_post,
- DepPrioritySatisfiedRange._ignore_runtime
-)
-
def _find_deep_system_runtime_deps(graph):
deep_system_deps = set()
node_stack = []
shown_licenses.add(l)
return have_eapi_mask
-class Task(SlotObject):
- __slots__ = ("_hash_key", "_hash_value")
-
- def _get_hash_key(self):
- hash_key = getattr(self, "_hash_key", None)
- if hash_key is None:
- raise NotImplementedError(self)
- return hash_key
-
- def __eq__(self, other):
- return self._get_hash_key() == other
-
- def __ne__(self, other):
- return self._get_hash_key() != other
-
- def __hash__(self):
- hash_value = getattr(self, "_hash_value", None)
- if hash_value is None:
- self._hash_value = hash(self._get_hash_key())
- return self._hash_value
-
- def __len__(self):
- return len(self._get_hash_key())
-
- def __getitem__(self, key):
- return self._get_hash_key()[key]
-
- def __iter__(self):
- return iter(self._get_hash_key())
-
- def __contains__(self, key):
- return key in self._get_hash_key()
-
- def __str__(self):
- return str(self._get_hash_key())
-
-class Blocker(Task):
-
- __hash__ = Task.__hash__
- __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
-
- def __init__(self, **kwargs):
- Task.__init__(self, **kwargs)
- self.cp = portage.dep_getkey(self.atom)
-
- def _get_hash_key(self):
- hash_key = getattr(self, "_hash_key", None)
- if hash_key is None:
- self._hash_key = \
- ("blocks", self.root, self.atom, self.eapi)
- return self._hash_key
-
class Package(Task):
__hash__ = Task.__hash__
if other.cp != self.cp:
return False
if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
- return True
- return False
-
- def __ge__(self, other):
- if other.cp != self.cp:
- return False
- if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
- return True
- return False
-
-_all_metadata_keys = set(x for x in portage.auxdbkeys \
- if not x.startswith("UNUSED_"))
-_all_metadata_keys.discard("CDEPEND")
-_all_metadata_keys.update(Package.metadata_keys)
-
-from portage.cache.mappings import slot_dict_class
-_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
-
-class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
- """
- Detect metadata updates and synchronize Package attributes.
- """
-
- __slots__ = ("_pkg",)
- _wrapped_keys = frozenset(
- ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
-
- def __init__(self, pkg, metadata):
- _PackageMetadataWrapperBase.__init__(self)
- self._pkg = pkg
- self.update(metadata)
-
- def __setitem__(self, k, v):
- _PackageMetadataWrapperBase.__setitem__(self, k, v)
- if k in self._wrapped_keys:
- getattr(self, "_set_" + k.lower())(k, v)
-
- def _set_inherited(self, k, v):
- if isinstance(v, basestring):
- v = frozenset(v.split())
- self._pkg.inherited = v
-
- def _set_iuse(self, k, v):
- self._pkg.iuse = self._pkg._iuse(
- v.split(), self._pkg.root_config.iuse_implicit)
-
- def _set_slot(self, k, v):
- self._pkg.slot = v
-
- def _set_use(self, k, v):
- self._pkg.use = self._pkg._use(v.split())
-
- def _set_counter(self, k, v):
- if isinstance(v, basestring):
- try:
- v = long(v.strip())
- except ValueError:
- v = 0
- self._pkg.counter = v
-
- def _set__mtime_(self, k, v):
- if isinstance(v, basestring):
- try:
- v = long(v.strip())
- except ValueError:
- v = 0
- self._pkg.mtime = v
-
-class EbuildFetchonly(SlotObject):
-
- __slots__ = ("fetch_all", "pkg", "pretend", "settings")
-
- def execute(self):
- settings = self.settings
- pkg = self.pkg
- portdb = pkg.root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(pkg.cpv)
- settings.setcpv(pkg)
- debug = settings.get("PORTAGE_DEBUG") == "1"
- restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
-
- if restrict_fetch:
- rval = self._execute_with_builddir()
- else:
- rval = portage.doebuild(ebuild_path, "fetch",
- settings["ROOT"], settings, debug=debug,
- listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
- mydbapi=portdb, tree="porttree")
-
- if rval != os.EX_OK:
- msg = "Fetch failed for '%s'" % (pkg.cpv,)
- eerror(msg, phase="unpack", key=pkg.cpv)
-
- return rval
-
- def _execute_with_builddir(self):
- # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
- # ensuring sane $PWD (bug #239560) and storing elog
- # messages. Use a private temp directory, in order
- # to avoid locking the main one.
- settings = self.settings
- global_tmpdir = settings["PORTAGE_TMPDIR"]
- from tempfile import mkdtemp
- try:
- private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
- except OSError, e:
- if e.errno != portage.exception.PermissionDenied.errno:
- raise
- raise portage.exception.PermissionDenied(global_tmpdir)
- settings["PORTAGE_TMPDIR"] = private_tmpdir
- settings.backup_changes("PORTAGE_TMPDIR")
- try:
- retval = self._execute()
- finally:
- settings["PORTAGE_TMPDIR"] = global_tmpdir
- settings.backup_changes("PORTAGE_TMPDIR")
- shutil.rmtree(private_tmpdir)
- return retval
-
- def _execute(self):
- settings = self.settings
- pkg = self.pkg
- root_config = pkg.root_config
- portdb = root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(pkg.cpv)
- debug = settings.get("PORTAGE_DEBUG") == "1"
- retval = portage.doebuild(ebuild_path, "fetch",
- self.settings["ROOT"], self.settings, debug=debug,
- listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
- mydbapi=portdb, tree="porttree")
-
- if retval != os.EX_OK:
- msg = "Fetch failed for '%s'" % (pkg.cpv,)
- eerror(msg, phase="unpack", key=pkg.cpv)
-
- portage.elog.elog_process(self.pkg.cpv, self.settings)
- return retval
-
-class PollConstants(object):
-
- """
- Provides POLL* constants that are equivalent to those from the
- select module, for use by PollSelectAdapter.
- """
-
- names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
- v = 1
- for k in names:
- locals()[k] = getattr(select, k, v)
- v *= 2
- del k, v
-
-class AsynchronousTask(SlotObject):
- """
- Subclasses override _wait() and _poll() so that calls
- to public methods can be wrapped for implementing
- hooks such as exit listener notification.
-
- Sublasses should call self.wait() to notify exit listeners after
- the task is complete and self.returncode has been set.
- """
-
- __slots__ = ("background", "cancelled", "returncode") + \
- ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
-
- def start(self):
- """
- Start an asynchronous task and then return as soon as possible.
- """
- self._start_hook()
- self._start()
-
- def _start(self):
- raise NotImplementedError(self)
-
- def isAlive(self):
- return self.returncode is None
-
- def poll(self):
- self._wait_hook()
- return self._poll()
-
- def _poll(self):
- return self.returncode
-
- def wait(self):
- if self.returncode is None:
- self._wait()
- self._wait_hook()
- return self.returncode
-
- def _wait(self):
- return self.returncode
-
- def cancel(self):
- self.cancelled = True
- self.wait()
-
- def addStartListener(self, f):
- """
- The function will be called with one argument, a reference to self.
- """
- if self._start_listeners is None:
- self._start_listeners = []
- self._start_listeners.append(f)
-
- def removeStartListener(self, f):
- if self._start_listeners is None:
- return
- self._start_listeners.remove(f)
-
- def _start_hook(self):
- if self._start_listeners is not None:
- start_listeners = self._start_listeners
- self._start_listeners = None
-
- for f in start_listeners:
- f(self)
-
- def addExitListener(self, f):
- """
- The function will be called with one argument, a reference to self.
- """
- if self._exit_listeners is None:
- self._exit_listeners = []
- self._exit_listeners.append(f)
-
- def removeExitListener(self, f):
- if self._exit_listeners is None:
- if self._exit_listener_stack is not None:
- self._exit_listener_stack.remove(f)
- return
- self._exit_listeners.remove(f)
-
- def _wait_hook(self):
- """
- Call this method after the task completes, just before returning
- the returncode from wait() or poll(). This hook is
- used to trigger exit listeners when the returncode first
- becomes available.
- """
- if self.returncode is not None and \
- self._exit_listeners is not None:
-
- # This prevents recursion, in case one of the
- # exit handlers triggers this method again by
- # calling wait(). Use a stack that gives
- # removeExitListener() an opportunity to consume
- # listeners from the stack, before they can get
- # called below. This is necessary because a call
- # to one exit listener may result in a call to
- # removeExitListener() for another listener on
- # the stack. That listener needs to be removed
- # from the stack since it would be inconsistent
- # to call it after it has been been passed into
- # removeExitListener().
- self._exit_listener_stack = self._exit_listeners
- self._exit_listeners = None
-
- self._exit_listener_stack.reverse()
- while self._exit_listener_stack:
- self._exit_listener_stack.pop()(self)
-
-class AbstractPollTask(AsynchronousTask):
-
- __slots__ = ("scheduler",) + \
- ("_registered",)
-
- _bufsize = 4096
- _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
- _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
- _exceptional_events
-
- def _unregister(self):
- raise NotImplementedError(self)
-
- def _unregister_if_appropriate(self, event):
- if self._registered:
- if event & self._exceptional_events:
- self._unregister()
- self.cancel()
- elif event & PollConstants.POLLHUP:
- self._unregister()
- self.wait()
-
-class PipeReader(AbstractPollTask):
-
- """
- Reads output from one or more files and saves it in memory,
- for retrieval via the getvalue() method. This is driven by
- the scheduler's poll() loop, so it runs entirely within the
- current process.
- """
-
- __slots__ = ("input_files",) + \
- ("_read_data", "_reg_ids")
-
- def _start(self):
- self._reg_ids = set()
- self._read_data = []
- for k, f in self.input_files.iteritems():
- fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
- fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_ids.add(self.scheduler.register(f.fileno(),
- self._registered_events, self._output_handler))
- self._registered = True
-
- def isAlive(self):
- return self._registered
-
- def cancel(self):
- if self.returncode is None:
- self.returncode = 1
- self.cancelled = True
- self.wait()
-
- def _wait(self):
- if self.returncode is not None:
- return self.returncode
-
- if self._registered:
- self.scheduler.schedule(self._reg_ids)
- self._unregister()
-
- self.returncode = os.EX_OK
- return self.returncode
-
- def getvalue(self):
- """Retrieve the entire contents"""
- if sys.hexversion >= 0x3000000:
- return bytes().join(self._read_data)
- return "".join(self._read_data)
-
- def close(self):
- """Free the memory buffer."""
- self._read_data = None
-
- def _output_handler(self, fd, event):
-
- if event & PollConstants.POLLIN:
-
- for f in self.input_files.itervalues():
- if fd == f.fileno():
- break
-
- buf = array.array('B')
- try:
- buf.fromfile(f, self._bufsize)
- except EOFError:
- pass
-
- if buf:
- self._read_data.append(buf.tostring())
- else:
- self._unregister()
- self.wait()
-
- self._unregister_if_appropriate(event)
- return self._registered
-
- def _unregister(self):
- """
- Unregister from the scheduler and close open files.
- """
-
- self._registered = False
-
- if self._reg_ids is not None:
- for reg_id in self._reg_ids:
- self.scheduler.unregister(reg_id)
- self._reg_ids = None
-
- if self.input_files is not None:
- for f in self.input_files.itervalues():
- f.close()
- self.input_files = None
-
-class CompositeTask(AsynchronousTask):
-
- __slots__ = ("scheduler",) + ("_current_task",)
-
- def isAlive(self):
- return self._current_task is not None
-
- def cancel(self):
- self.cancelled = True
- if self._current_task is not None:
- self._current_task.cancel()
-
- def _poll(self):
- """
- This does a loop calling self._current_task.poll()
- repeatedly as long as the value of self._current_task
- keeps changing. It calls poll() a maximum of one time
- for a given self._current_task instance. This is useful
- since calling poll() on a task can trigger advance to
- the next task could eventually lead to the returncode
- being set in cases when polling only a single task would
- not have the same effect.
- """
-
- prev = None
- while True:
- task = self._current_task
- if task is None or task is prev:
- # don't poll the same task more than once
- break
- task.poll()
- prev = task
-
- return self.returncode
-
- def _wait(self):
-
- prev = None
- while True:
- task = self._current_task
- if task is None:
- # don't wait for the same task more than once
- break
- if task is prev:
- # Before the task.wait() method returned, an exit
- # listener should have set self._current_task to either
- # a different task or None. Something is wrong.
- raise AssertionError("self._current_task has not " + \
- "changed since calling wait", self, task)
- task.wait()
- prev = task
-
- return self.returncode
-
- def _assert_current(self, task):
- """
- Raises an AssertionError if the given task is not the
- same one as self._current_task. This can be useful
- for detecting bugs.
- """
- if task is not self._current_task:
- raise AssertionError("Unrecognized task: %s" % (task,))
-
- def _default_exit(self, task):
- """
- Calls _assert_current() on the given task and then sets the
- composite returncode attribute if task.returncode != os.EX_OK.
- If the task failed then self._current_task will be set to None.
- Subclasses can use this as a generic task exit callback.
-
- @rtype: int
- @returns: The task.returncode attribute.
- """
- self._assert_current(task)
- if task.returncode != os.EX_OK:
- self.returncode = task.returncode
- self._current_task = None
- return task.returncode
-
- def _final_exit(self, task):
- """
- Assumes that task is the final task of this composite task.
- Calls _default_exit() and sets self.returncode to the task's
- returncode and sets self._current_task to None.
- """
- self._default_exit(task)
- self._current_task = None
- self.returncode = task.returncode
- return self.returncode
-
- def _default_final_exit(self, task):
- """
- This calls _final_exit() and then wait().
-
- Subclasses can use this as a generic final task exit callback.
-
- """
- self._final_exit(task)
- return self.wait()
-
- def _start_task(self, task, exit_handler):
- """
- Register exit handler for the given task, set it
- as self._current_task, and call task.start().
-
- Subclasses can use this as a generic way to start
- a task.
-
- """
- task.addExitListener(exit_handler)
- self._current_task = task
- task.start()
-
-class TaskSequence(CompositeTask):
- """
- A collection of tasks that executes sequentially. Each task
- must have a addExitListener() method that can be used as
- a means to trigger movement from one task to the next.
- """
-
- __slots__ = ("_task_queue",)
-
- def __init__(self, **kwargs):
- AsynchronousTask.__init__(self, **kwargs)
- self._task_queue = deque()
-
- def add(self, task):
- self._task_queue.append(task)
-
- def _start(self):
- self._start_next_task()
-
- def cancel(self):
- self._task_queue.clear()
- CompositeTask.cancel(self)
-
- def _start_next_task(self):
- self._start_task(self._task_queue.popleft(),
- self._task_exit_handler)
-
- def _task_exit_handler(self, task):
- if self._default_exit(task) != os.EX_OK:
- self.wait()
- elif self._task_queue:
- self._start_next_task()
- else:
- self._final_exit(task)
- self.wait()
-
-class SubProcess(AbstractPollTask):
-
- __slots__ = ("pid",) + \
- ("_files", "_reg_id")
-
- # A file descriptor is required for the scheduler to monitor changes from
- # inside a poll() loop. When logging is not enabled, create a pipe just to
- # serve this purpose alone.
- _dummy_pipe_fd = 9
-
- def _poll(self):
- if self.returncode is not None:
- return self.returncode
- if self.pid is None:
- return self.returncode
- if self._registered:
- return self.returncode
-
- try:
- retval = os.waitpid(self.pid, os.WNOHANG)
- except OSError, e:
- if e.errno != errno.ECHILD:
- raise
- del e
- retval = (self.pid, 1)
-
- if retval == (0, 0):
- return None
- self._set_returncode(retval)
- return self.returncode
-
- def cancel(self):
- if self.isAlive():
- try:
- os.kill(self.pid, signal.SIGTERM)
- except OSError, e:
- if e.errno != errno.ESRCH:
- raise
- del e
-
- self.cancelled = True
- if self.pid is not None:
- self.wait()
- return self.returncode
-
- def isAlive(self):
- return self.pid is not None and \
- self.returncode is None
-
- def _wait(self):
-
- if self.returncode is not None:
- return self.returncode
-
- if self._registered:
- self.scheduler.schedule(self._reg_id)
- self._unregister()
- if self.returncode is not None:
- return self.returncode
-
- try:
- wait_retval = os.waitpid(self.pid, 0)
- except OSError, e:
- if e.errno != errno.ECHILD:
- raise
- del e
- self._set_returncode((self.pid, 1))
- else:
- self._set_returncode(wait_retval)
-
- return self.returncode
-
- def _unregister(self):
- """
- Unregister from the scheduler and close open files.
- """
-
- self._registered = False
-
- if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
- self._reg_id = None
-
- if self._files is not None:
- for f in self._files.itervalues():
- f.close()
- self._files = None
-
- def _set_returncode(self, wait_retval):
-
- retval = wait_retval[1]
-
- if retval != os.EX_OK:
- if retval & 0xff:
- retval = (retval & 0xff) << 8
- else:
- retval = retval >> 8
-
- self.returncode = retval
-
-class SpawnProcess(SubProcess):
-
- """
- Constructor keyword args are passed into portage.process.spawn().
- The required "args" keyword argument will be passed as the first
- spawn() argument.
- """
-
- _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
- "uid", "gid", "groups", "umask", "logfile",
- "path_lookup", "pre_exec")
-
- __slots__ = ("args",) + \
- _spawn_kwarg_names
-
- _file_names = ("log", "process", "stdout")
- _files_dict = slot_dict_class(_file_names, prefix="")
-
- def _start(self):
-
- if self.cancelled:
- return
-
- if self.fd_pipes is None:
- self.fd_pipes = {}
- fd_pipes = self.fd_pipes
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stderr.fileno())
-
- # flush any pending output
- for fd in fd_pipes.itervalues():
- if fd == sys.stdout.fileno():
- sys.stdout.flush()
- if fd == sys.stderr.fileno():
- sys.stderr.flush()
-
- logfile = self.logfile
- self._files = self._files_dict()
- files = self._files
-
- master_fd, slave_fd = self._pipe(fd_pipes)
- fcntl.fcntl(master_fd, fcntl.F_SETFL,
- fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
-
- null_input = None
- fd_pipes_orig = fd_pipes.copy()
- if self.background:
- # TODO: Use job control functions like tcsetpgrp() to control
- # access to stdin. Until then, use /dev/null so that any
- # attempts to read from stdin will immediately return EOF
- # instead of blocking indefinitely.
- null_input = open('/dev/null', 'rb')
- fd_pipes[0] = null_input.fileno()
- else:
- fd_pipes[0] = fd_pipes_orig[0]
-
- files.process = os.fdopen(master_fd, 'rb')
- if logfile is not None:
-
- fd_pipes[1] = slave_fd
- fd_pipes[2] = slave_fd
-
- files.log = open(logfile, mode='ab')
- portage.util.apply_secpass_permissions(logfile,
- uid=portage.portage_uid, gid=portage.portage_gid,
- mode=0660)
-
- if not self.background:
- files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
-
- output_handler = self._output_handler
-
- else:
-
- # Create a dummy pipe so the scheduler can monitor
- # the process from inside a poll() loop.
- fd_pipes[self._dummy_pipe_fd] = slave_fd
- if self.background:
- fd_pipes[1] = slave_fd
- fd_pipes[2] = slave_fd
- output_handler = self._dummy_handler
-
- kwargs = {}
- for k in self._spawn_kwarg_names:
- v = getattr(self, k)
- if v is not None:
- kwargs[k] = v
-
- kwargs["fd_pipes"] = fd_pipes
- kwargs["returnpid"] = True
- kwargs.pop("logfile", None)
-
- self._reg_id = self.scheduler.register(files.process.fileno(),
- self._registered_events, output_handler)
- self._registered = True
-
- retval = self._spawn(self.args, **kwargs)
-
- os.close(slave_fd)
- if null_input is not None:
- null_input.close()
-
- if isinstance(retval, int):
- # spawn failed
- self._unregister()
- self.returncode = retval
- self.wait()
- return
-
- self.pid = retval[0]
- portage.process.spawned_pids.remove(self.pid)
-
- def _pipe(self, fd_pipes):
- """
- @type fd_pipes: dict
- @param fd_pipes: pipes from which to copy terminal size if desired.
- """
- return os.pipe()
-
- def _spawn(self, args, **kwargs):
- return portage.process.spawn(args, **kwargs)
-
- def _output_handler(self, fd, event):
-
- if event & PollConstants.POLLIN:
-
- files = self._files
- buf = array.array('B')
- try:
- buf.fromfile(files.process, self._bufsize)
- except EOFError:
- pass
-
- if buf:
- if not self.background:
- write_successful = False
- failures = 0
- while True:
- try:
- if not write_successful:
- buf.tofile(files.stdout)
- write_successful = True
- files.stdout.flush()
- break
- except IOError, e:
- if e.errno != errno.EAGAIN:
- raise
- del e
- failures += 1
- if failures > 50:
- # Avoid a potentially infinite loop. In
- # most cases, the failure count is zero
- # and it's unlikely to exceed 1.
- raise
-
- # This means that a subprocess has put an inherited
- # stdio file descriptor (typically stdin) into
- # O_NONBLOCK mode. This is not acceptable (see bug
- # #264435), so revert it. We need to use a loop
- # here since there's a race condition due to
- # parallel processes being able to change the
- # flags on the inherited file descriptor.
- # TODO: When possible, avoid having child processes
- # inherit stdio file descriptors from portage
- # (maybe it can't be avoided with
- # PROPERTIES=interactive).
- fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
- fcntl.fcntl(files.stdout.fileno(),
- fcntl.F_GETFL) ^ os.O_NONBLOCK)
-
- buf.tofile(files.log)
- files.log.flush()
- else:
- self._unregister()
- self.wait()
-
- self._unregister_if_appropriate(event)
- return self._registered
-
- def _dummy_handler(self, fd, event):
- """
- This method is mainly interested in detecting EOF, since
- the only purpose of the pipe is to allow the scheduler to
- monitor the process from inside a poll() loop.
- """
-
- if event & PollConstants.POLLIN:
-
- buf = array.array('B')
- try:
- buf.fromfile(self._files.process, self._bufsize)
- except EOFError:
- pass
-
- if buf:
- pass
- else:
- self._unregister()
- self.wait()
-
- self._unregister_if_appropriate(event)
- return self._registered
-
-class MiscFunctionsProcess(SpawnProcess):
- """
- Spawns misc-functions.sh with an existing ebuild environment.
- """
-
- __slots__ = ("commands", "phase", "pkg", "settings")
-
- def _start(self):
- settings = self.settings
- settings.pop("EBUILD_PHASE", None)
- portage_bin_path = settings["PORTAGE_BIN_PATH"]
- misc_sh_binary = os.path.join(portage_bin_path,
- os.path.basename(portage.const.MISC_SH_BINARY))
-
- self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
- self.logfile = settings.get("PORTAGE_LOG_FILE")
-
- portage._doebuild_exit_status_unlink(
- settings.get("EBUILD_EXIT_STATUS_FILE"))
-
- SpawnProcess._start(self)
-
- def _spawn(self, args, **kwargs):
- settings = self.settings
- debug = settings.get("PORTAGE_DEBUG") == "1"
- return portage.spawn(" ".join(args), settings,
- debug=debug, **kwargs)
-
- def _set_returncode(self, wait_retval):
- SpawnProcess._set_returncode(self, wait_retval)
- self.returncode = portage._doebuild_exit_status_check_and_log(
- self.settings, self.phase, self.returncode)
-
-class EbuildFetcher(SpawnProcess):
-
- __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
- ("_build_dir",)
-
- def _start(self):
-
- root_config = self.pkg.root_config
- portdb = root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(self.pkg.cpv)
- settings = self.config_pool.allocate()
- settings.setcpv(self.pkg)
-
- # In prefetch mode, logging goes to emerge-fetch.log and the builddir
- # should not be touched since otherwise it could interfere with
- # another instance of the same cpv concurrently being built for a
- # different $ROOT (currently, builds only cooperate with prefetchers
- # that are spawned for the same $ROOT).
- if not self.prefetch:
- self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
- self._build_dir.lock()
- self._build_dir.clean_log()
- portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
- if self.logfile is None:
- self.logfile = settings.get("PORTAGE_LOG_FILE")
-
- phase = "fetch"
- if self.fetchall:
- phase = "fetchall"
-
- # If any incremental variables have been overridden
- # via the environment, those values need to be passed
- # along here so that they are correctly considered by
- # the config instance in the subproccess.
- fetch_env = os.environ.copy()
-
- nocolor = settings.get("NOCOLOR")
- if nocolor is not None:
- fetch_env["NOCOLOR"] = nocolor
-
- fetch_env["PORTAGE_NICENESS"] = "0"
- if self.prefetch:
- fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
-
- ebuild_binary = os.path.join(
- settings["PORTAGE_BIN_PATH"], "ebuild")
-
- fetch_args = [ebuild_binary, ebuild_path, phase]
- debug = settings.get("PORTAGE_DEBUG") == "1"
- if debug:
- fetch_args.append("--debug")
-
- self.args = fetch_args
- self.env = fetch_env
- SpawnProcess._start(self)
-
- def _pipe(self, fd_pipes):
- """When appropriate, use a pty so that fetcher progress bars,
- like wget has, will work properly."""
- if self.background or not sys.stdout.isatty():
- # When the output only goes to a log file,
- # there's no point in creating a pty.
- return os.pipe()
- stdout_pipe = fd_pipes.get(1)
- got_pty, master_fd, slave_fd = \
- portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
- return (master_fd, slave_fd)
-
- def _set_returncode(self, wait_retval):
- SpawnProcess._set_returncode(self, wait_retval)
- # Collect elog messages that might have been
- # created by the pkg_nofetch phase.
- if self._build_dir is not None:
- # Skip elog messages for prefetch, in order to avoid duplicates.
- if not self.prefetch and self.returncode != os.EX_OK:
- elog_out = None
- if self.logfile is not None:
- if self.background:
- elog_out = open(self.logfile, 'a')
- msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
- if self.logfile is not None:
- msg += ", Log file:"
- eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
- if self.logfile is not None:
- eerror(" '%s'" % (self.logfile,),
- phase="unpack", key=self.pkg.cpv, out=elog_out)
- if elog_out is not None:
- elog_out.close()
- if not self.prefetch:
- portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
- features = self._build_dir.settings.features
- if self.returncode == os.EX_OK:
- self._build_dir.clean_log()
- self._build_dir.unlock()
- self.config_pool.deallocate(self._build_dir.settings)
- self._build_dir = None
-
-class EbuildBuildDir(SlotObject):
-
- __slots__ = ("dir_path", "pkg", "settings",
- "locked", "_catdir", "_lock_obj")
-
- def __init__(self, **kwargs):
- SlotObject.__init__(self, **kwargs)
- self.locked = False
-
- def lock(self):
- """
- This raises an AlreadyLocked exception if lock() is called
- while a lock is already held. In order to avoid this, call
- unlock() or check whether the "locked" attribute is True
- or False before calling lock().
- """
- if self._lock_obj is not None:
- raise self.AlreadyLocked((self._lock_obj,))
-
- dir_path = self.dir_path
- if dir_path is None:
- root_config = self.pkg.root_config
- portdb = root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(self.pkg.cpv)
- settings = self.settings
- settings.setcpv(self.pkg)
- debug = settings.get("PORTAGE_DEBUG") == "1"
- use_cache = 1 # always true
- portage.doebuild_environment(ebuild_path, "setup", root_config.root,
- self.settings, debug, use_cache, portdb)
- dir_path = self.settings["PORTAGE_BUILDDIR"]
-
- catdir = os.path.dirname(dir_path)
- self._catdir = catdir
-
- portage.util.ensure_dirs(os.path.dirname(catdir),
- gid=portage.portage_gid,
- mode=070, mask=0)
- catdir_lock = None
- try:
- catdir_lock = portage.locks.lockdir(catdir)
- portage.util.ensure_dirs(catdir,
- gid=portage.portage_gid,
- mode=070, mask=0)
- self._lock_obj = portage.locks.lockdir(dir_path)
- finally:
- self.locked = self._lock_obj is not None
- if catdir_lock is not None:
- portage.locks.unlockdir(catdir_lock)
-
- def clean_log(self):
- """Discard existing log."""
- settings = self.settings
-
- for x in ('.logid', 'temp/build.log'):
- try:
- os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
- except OSError:
- pass
-
- def unlock(self):
- if self._lock_obj is None:
- return
-
- portage.locks.unlockdir(self._lock_obj)
- self._lock_obj = None
- self.locked = False
-
- catdir = self._catdir
- catdir_lock = None
- try:
- catdir_lock = portage.locks.lockdir(catdir)
- finally:
- if catdir_lock:
- try:
- os.rmdir(catdir)
- except OSError, e:
- if e.errno not in (errno.ENOENT,
- errno.ENOTEMPTY, errno.EEXIST):
- raise
- del e
- portage.locks.unlockdir(catdir_lock)
-
- class AlreadyLocked(portage.exception.PortageException):
- pass
-
-class EbuildBuild(CompositeTask):
-
- __slots__ = ("args_set", "config_pool", "find_blockers",
- "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
- "prefetcher", "settings", "world_atom") + \
- ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
-
- def _start(self):
-
- logger = self.logger
- opts = self.opts
- pkg = self.pkg
- settings = self.settings
- world_atom = self.world_atom
- root_config = pkg.root_config
- tree = "porttree"
- self._tree = tree
- portdb = root_config.trees[tree].dbapi
- settings.setcpv(pkg)
- settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
- ebuild_path = portdb.findname(self.pkg.cpv)
- self._ebuild_path = ebuild_path
-
- prefetcher = self.prefetcher
- if prefetcher is None:
- pass
- elif not prefetcher.isAlive():
- prefetcher.cancel()
- elif prefetcher.poll() is None:
-
- waiting_msg = "Fetching files " + \
- "in the background. " + \
- "To view fetch progress, run `tail -f " + \
- "/var/log/emerge-fetch.log` in another " + \
- "terminal."
- msg_prefix = colorize("GOOD", " * ")
- from textwrap import wrap
- waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
- for line in wrap(waiting_msg, 65))
- if not self.background:
- writemsg(waiting_msg, noiselevel=-1)
-
- self._current_task = prefetcher
- prefetcher.addExitListener(self._prefetch_exit)
- return
-
- self._prefetch_exit(prefetcher)
-
- def _prefetch_exit(self, prefetcher):
-
- opts = self.opts
- pkg = self.pkg
- settings = self.settings
-
- if opts.fetchonly:
- fetcher = EbuildFetchonly(
- fetch_all=opts.fetch_all_uri,
- pkg=pkg, pretend=opts.pretend,
- settings=settings)
- retval = fetcher.execute()
- self.returncode = retval
- self.wait()
- return
-
- fetcher = EbuildFetcher(config_pool=self.config_pool,
- fetchall=opts.fetch_all_uri,
- fetchonly=opts.fetchonly,
- background=self.background,
- pkg=pkg, scheduler=self.scheduler)
-
- self._start_task(fetcher, self._fetch_exit)
-
- def _fetch_exit(self, fetcher):
- opts = self.opts
- pkg = self.pkg
-
- fetch_failed = False
- if opts.fetchonly:
- fetch_failed = self._final_exit(fetcher) != os.EX_OK
- else:
- fetch_failed = self._default_exit(fetcher) != os.EX_OK
-
- if fetch_failed and fetcher.logfile is not None and \
- os.path.exists(fetcher.logfile):
- self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
-
- if not fetch_failed and fetcher.logfile is not None:
- # Fetch was successful, so remove the fetch log.
- try:
- os.unlink(fetcher.logfile)
- except OSError:
- pass
-
- if fetch_failed or opts.fetchonly:
- self.wait()
- return
-
- logger = self.logger
- opts = self.opts
- pkg_count = self.pkg_count
- scheduler = self.scheduler
- settings = self.settings
- features = settings.features
- ebuild_path = self._ebuild_path
- system_set = pkg.root_config.sets["system"]
-
- self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
- self._build_dir.lock()
-
- # Cleaning is triggered before the setup
- # phase, in portage.doebuild().
- msg = " === (%s of %s) Cleaning (%s::%s)" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
- short_msg = "emerge: (%s of %s) %s Clean" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv)
- logger.log(msg, short_msg=short_msg)
-
- #buildsyspkg: Check if we need to _force_ binary package creation
- self._issyspkg = "buildsyspkg" in features and \
- system_set.findAtomForPackage(pkg) and \
- not opts.buildpkg
-
- if opts.buildpkg or self._issyspkg:
-
- self._buildpkg = True
-
- msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
- short_msg = "emerge: (%s of %s) %s Compile" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv)
- logger.log(msg, short_msg=short_msg)
-
- else:
- msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
- short_msg = "emerge: (%s of %s) %s Compile" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv)
- logger.log(msg, short_msg=short_msg)
-
- build = EbuildExecuter(background=self.background, pkg=pkg,
- scheduler=scheduler, settings=settings)
- self._start_task(build, self._build_exit)
-
- def _unlock_builddir(self):
- portage.elog.elog_process(self.pkg.cpv, self.settings)
- self._build_dir.unlock()
-
- def _build_exit(self, build):
- if self._default_exit(build) != os.EX_OK:
- self._unlock_builddir()
- self.wait()
- return
-
- opts = self.opts
- buildpkg = self._buildpkg
-
- if not buildpkg:
- self._final_exit(build)
- self.wait()
- return
-
- if self._issyspkg:
- msg = ">>> This is a system package, " + \
- "let's pack a rescue tarball.\n"
-
- log_path = self.settings.get("PORTAGE_LOG_FILE")
- if log_path is not None:
- log_file = open(log_path, 'a')
- try:
- log_file.write(msg)
- finally:
- log_file.close()
-
- if not self.background:
- portage.writemsg_stdout(msg, noiselevel=-1)
-
- packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
- scheduler=self.scheduler, settings=self.settings)
-
- self._start_task(packager, self._buildpkg_exit)
-
- def _buildpkg_exit(self, packager):
- """
- Released build dir lock when there is a failure or
- when in buildpkgonly mode. Otherwise, the lock will
- be released when merge() is called.
- """
-
- if self._default_exit(packager) != os.EX_OK:
- self._unlock_builddir()
- self.wait()
- return
-
- if self.opts.buildpkgonly:
- # Need to call "clean" phase for buildpkgonly mode
- portage.elog.elog_process(self.pkg.cpv, self.settings)
- phase = "clean"
- clean_phase = EbuildPhase(background=self.background,
- pkg=self.pkg, phase=phase,
- scheduler=self.scheduler, settings=self.settings,
- tree=self._tree)
- self._start_task(clean_phase, self._clean_exit)
- return
-
- # Continue holding the builddir lock until
- # after the package has been installed.
- self._current_task = None
- self.returncode = packager.returncode
- self.wait()
-
- def _clean_exit(self, clean_phase):
- if self._final_exit(clean_phase) != os.EX_OK or \
- self.opts.buildpkgonly:
- self._unlock_builddir()
- self.wait()
-
- def install(self):
- """
- Install the package and then clean up and release locks.
- Only call this after the build has completed successfully
- and neither fetchonly nor buildpkgonly mode are enabled.
- """
-
- find_blockers = self.find_blockers
- ldpath_mtimes = self.ldpath_mtimes
- logger = self.logger
- pkg = self.pkg
- pkg_count = self.pkg_count
- settings = self.settings
- world_atom = self.world_atom
- ebuild_path = self._ebuild_path
- tree = self._tree
-
- merge = EbuildMerge(find_blockers=self.find_blockers,
- ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
- pkg_count=pkg_count, pkg_path=ebuild_path,
- scheduler=self.scheduler,
- settings=settings, tree=tree, world_atom=world_atom)
-
- msg = " === (%s of %s) Merging (%s::%s)" % \
- (pkg_count.curval, pkg_count.maxval,
- pkg.cpv, ebuild_path)
- short_msg = "emerge: (%s of %s) %s Merge" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv)
- logger.log(msg, short_msg=short_msg)
-
- try:
- rval = merge.execute()
- finally:
- self._unlock_builddir()
-
- return rval
-
-class EbuildExecuter(CompositeTask):
-
- __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
-
- _phases = ("prepare", "configure", "compile", "test", "install")
-
- _live_eclasses = frozenset([
- "bzr",
- "cvs",
- "darcs",
- "git",
- "mercurial",
- "subversion"
- ])
-
- def _start(self):
- self._tree = "porttree"
- pkg = self.pkg
- phase = "clean"
- clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
- scheduler=self.scheduler, settings=self.settings, tree=self._tree)
- self._start_task(clean_phase, self._clean_phase_exit)
-
- def _clean_phase_exit(self, clean_phase):
-
- if self._default_exit(clean_phase) != os.EX_OK:
- self.wait()
- return
-
- pkg = self.pkg
- scheduler = self.scheduler
- settings = self.settings
- cleanup = 1
-
- # This initializes PORTAGE_LOG_FILE.
- portage.prepare_build_dirs(pkg.root, settings, cleanup)
-
- setup_phase = EbuildPhase(background=self.background,
- pkg=pkg, phase="setup", scheduler=scheduler,
- settings=settings, tree=self._tree)
-
- setup_phase.addExitListener(self._setup_exit)
- self._current_task = setup_phase
- self.scheduler.scheduleSetup(setup_phase)
-
- def _setup_exit(self, setup_phase):
-
- if self._default_exit(setup_phase) != os.EX_OK:
- self.wait()
- return
-
- unpack_phase = EbuildPhase(background=self.background,
- pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
- settings=self.settings, tree=self._tree)
-
- if self._live_eclasses.intersection(self.pkg.inherited):
- # Serialize $DISTDIR access for live ebuilds since
- # otherwise they can interfere with eachother.
-
- unpack_phase.addExitListener(self._unpack_exit)
- self._current_task = unpack_phase
- self.scheduler.scheduleUnpack(unpack_phase)
-
- else:
- self._start_task(unpack_phase, self._unpack_exit)
-
- def _unpack_exit(self, unpack_phase):
-
- if self._default_exit(unpack_phase) != os.EX_OK:
- self.wait()
- return
-
- ebuild_phases = TaskSequence(scheduler=self.scheduler)
-
- pkg = self.pkg
- phases = self._phases
- eapi = pkg.metadata["EAPI"]
- if eapi in ("0", "1"):
- # skip src_prepare and src_configure
- phases = phases[2:]
-
- for phase in phases:
- ebuild_phases.add(EbuildPhase(background=self.background,
- pkg=self.pkg, phase=phase, scheduler=self.scheduler,
- settings=self.settings, tree=self._tree))
-
- self._start_task(ebuild_phases, self._default_final_exit)
-
-class EbuildMetadataPhase(SubProcess):
-
- """
- Asynchronous interface for the ebuild "depend" phase which is
- used to extract metadata from the ebuild.
- """
-
- __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
- "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
- ("_raw_metadata",)
-
- _file_names = ("ebuild",)
- _files_dict = slot_dict_class(_file_names, prefix="")
- _metadata_fd = 9
-
- def _start(self):
- settings = self.settings
- settings.setcpv(self.cpv)
- ebuild_path = self.ebuild_path
-
- eapi = None
- if 'parse-eapi-glep-55' in settings.features:
- pf, eapi = portage._split_ebuild_name_glep55(
- os.path.basename(ebuild_path))
- if eapi is None and \
- 'parse-eapi-ebuild-head' in settings.features:
- eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
- mode='r', encoding='utf_8', errors='replace'))
-
- if eapi is not None:
- if not portage.eapi_is_supported(eapi):
- self.metadata_callback(self.cpv, self.ebuild_path,
- self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
- self.returncode = os.EX_OK
- self.wait()
- return
-
- settings.configdict['pkg']['EAPI'] = eapi
-
- debug = settings.get("PORTAGE_DEBUG") == "1"
- master_fd = None
- slave_fd = None
- fd_pipes = None
- if self.fd_pipes is not None:
- fd_pipes = self.fd_pipes.copy()
- else:
- fd_pipes = {}
-
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stderr.fileno())
-
- # flush any pending output
- for fd in fd_pipes.itervalues():
- if fd == sys.stdout.fileno():
- sys.stdout.flush()
- if fd == sys.stderr.fileno():
- sys.stderr.flush()
-
- fd_pipes_orig = fd_pipes.copy()
- self._files = self._files_dict()
- files = self._files
-
- master_fd, slave_fd = os.pipe()
- fcntl.fcntl(master_fd, fcntl.F_SETFL,
- fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
-
- fd_pipes[self._metadata_fd] = slave_fd
-
- self._raw_metadata = []
- files.ebuild = os.fdopen(master_fd, 'r')
- self._reg_id = self.scheduler.register(files.ebuild.fileno(),
- self._registered_events, self._output_handler)
- self._registered = True
-
- retval = portage.doebuild(ebuild_path, "depend",
- settings["ROOT"], settings, debug,
- mydbapi=self.portdb, tree="porttree",
- fd_pipes=fd_pipes, returnpid=True)
-
- os.close(slave_fd)
-
- if isinstance(retval, int):
- # doebuild failed before spawning
- self._unregister()
- self.returncode = retval
- self.wait()
- return
-
- self.pid = retval[0]
- portage.process.spawned_pids.remove(self.pid)
-
- def _output_handler(self, fd, event):
-
- if event & PollConstants.POLLIN:
- self._raw_metadata.append(self._files.ebuild.read())
- if not self._raw_metadata[-1]:
- self._unregister()
- self.wait()
-
- self._unregister_if_appropriate(event)
- return self._registered
-
- def _set_returncode(self, wait_retval):
- SubProcess._set_returncode(self, wait_retval)
- if self.returncode == os.EX_OK:
- metadata_lines = "".join(self._raw_metadata).splitlines()
- if len(portage.auxdbkeys) != len(metadata_lines):
- # Don't trust bash's returncode if the
- # number of lines is incorrect.
- self.returncode = 1
- else:
- metadata = izip(portage.auxdbkeys, metadata_lines)
- self.metadata = self.metadata_callback(self.cpv,
- self.ebuild_path, self.repo_path, metadata,
- self.ebuild_mtime)
-
-class EbuildProcess(SpawnProcess):
-
- __slots__ = ("phase", "pkg", "settings", "tree")
-
- def _start(self):
- # Don't open the log file during the clean phase since the
- # open file can result in an nfs lock on $T/build.log which
- # prevents the clean phase from removing $T.
- if self.phase not in ("clean", "cleanrm"):
- self.logfile = self.settings.get("PORTAGE_LOG_FILE")
- SpawnProcess._start(self)
-
- def _pipe(self, fd_pipes):
- stdout_pipe = fd_pipes.get(1)
- got_pty, master_fd, slave_fd = \
- portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
- return (master_fd, slave_fd)
-
- def _spawn(self, args, **kwargs):
-
- root_config = self.pkg.root_config
- tree = self.tree
- mydbapi = root_config.trees[tree].dbapi
- settings = self.settings
- ebuild_path = settings["EBUILD"]
- debug = settings.get("PORTAGE_DEBUG") == "1"
-
- rval = portage.doebuild(ebuild_path, self.phase,
- root_config.root, settings, debug,
- mydbapi=mydbapi, tree=tree, **kwargs)
-
- return rval
-
- def _set_returncode(self, wait_retval):
- SpawnProcess._set_returncode(self, wait_retval)
-
- if self.phase not in ("clean", "cleanrm"):
- self.returncode = portage._doebuild_exit_status_check_and_log(
- self.settings, self.phase, self.returncode)
-
- if self.phase == "test" and self.returncode != os.EX_OK and \
- "test-fail-continue" in self.settings.features:
- self.returncode = os.EX_OK
-
- portage._post_phase_userpriv_perms(self.settings)
-
-class EbuildPhase(CompositeTask):
-
- __slots__ = ("background", "pkg", "phase",
- "scheduler", "settings", "tree")
-
- _post_phase_cmds = portage._post_phase_cmds
-
- def _start(self):
-
- ebuild_process = EbuildProcess(background=self.background,
- pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
- settings=self.settings, tree=self.tree)
-
- self._start_task(ebuild_process, self._ebuild_exit)
-
- def _ebuild_exit(self, ebuild_process):
-
- if self.phase == "install":
- out = None
- log_path = self.settings.get("PORTAGE_LOG_FILE")
- log_file = None
- if self.background and log_path is not None:
- log_file = open(log_path, 'a')
- out = log_file
- try:
- portage._check_build_log(self.settings, out=out)
- finally:
- if log_file is not None:
- log_file.close()
-
- if self._default_exit(ebuild_process) != os.EX_OK:
- self.wait()
- return
-
- settings = self.settings
-
- if self.phase == "install":
- portage._post_src_install_chost_fix(settings)
- portage._post_src_install_uid_fix(settings)
-
- post_phase_cmds = self._post_phase_cmds.get(self.phase)
- if post_phase_cmds is not None:
- post_phase = MiscFunctionsProcess(background=self.background,
- commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
- scheduler=self.scheduler, settings=settings)
- self._start_task(post_phase, self._post_phase_exit)
- return
-
- self.returncode = ebuild_process.returncode
- self._current_task = None
- self.wait()
-
- def _post_phase_exit(self, post_phase):
- if self._final_exit(post_phase) != os.EX_OK:
- writemsg("!!! post %s failed; exiting.\n" % self.phase,
- noiselevel=-1)
- self._current_task = None
- self.wait()
- return
-
-class EbuildBinpkg(EbuildProcess):
- """
- This assumes that src_install() has successfully completed.
- """
- __slots__ = ("_binpkg_tmpfile",)
-
- def _start(self):
- self.phase = "package"
- self.tree = "porttree"
- pkg = self.pkg
- root_config = pkg.root_config
- portdb = root_config.trees["porttree"].dbapi
- bintree = root_config.trees["bintree"]
- ebuild_path = portdb.findname(self.pkg.cpv)
- settings = self.settings
- debug = settings.get("PORTAGE_DEBUG") == "1"
-
- bintree.prevent_collision(pkg.cpv)
- binpkg_tmpfile = os.path.join(bintree.pkgdir,
- pkg.cpv + ".tbz2." + str(os.getpid()))
- self._binpkg_tmpfile = binpkg_tmpfile
- settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
- settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
-
- try:
- EbuildProcess._start(self)
- finally:
- settings.pop("PORTAGE_BINPKG_TMPFILE", None)
-
- def _set_returncode(self, wait_retval):
- EbuildProcess._set_returncode(self, wait_retval)
-
- pkg = self.pkg
- bintree = pkg.root_config.trees["bintree"]
- binpkg_tmpfile = self._binpkg_tmpfile
- if self.returncode == os.EX_OK:
- bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
-
-class EbuildMerge(SlotObject):
-
- __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
- "pkg", "pkg_count", "pkg_path", "pretend",
- "scheduler", "settings", "tree", "world_atom")
-
- def execute(self):
- root_config = self.pkg.root_config
- settings = self.settings
- retval = portage.merge(settings["CATEGORY"],
- settings["PF"], settings["D"],
- os.path.join(settings["PORTAGE_BUILDDIR"],
- "build-info"), root_config.root, settings,
- myebuild=settings["EBUILD"],
- mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
- vartree=root_config.trees["vartree"],
- prev_mtimes=self.ldpath_mtimes,
- scheduler=self.scheduler,
- blockers=self.find_blockers)
-
- if retval == os.EX_OK:
- self.world_atom(self.pkg)
- self._log_success()
-
- return retval
-
- def _log_success(self):
- pkg = self.pkg
- pkg_count = self.pkg_count
- pkg_path = self.pkg_path
- logger = self.logger
- if "noclean" not in self.settings.features:
- short_msg = "emerge: (%s of %s) %s Clean Post" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv)
- logger.log((" === (%s of %s) " + \
- "Post-Build Cleaning (%s::%s)") % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
- short_msg=short_msg)
- logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
-
-class PackageUninstall(AsynchronousTask):
-
- __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
-
- def _start(self):
- try:
- unmerge(self.pkg.root_config, self.opts, "unmerge",
- [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
- clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
- writemsg_level=self._writemsg_level)
- except UninstallFailure, e:
- self.returncode = e.status
- else:
- self.returncode = os.EX_OK
- self.wait()
-
- def _writemsg_level(self, msg, level=0, noiselevel=0):
-
- log_path = self.settings.get("PORTAGE_LOG_FILE")
- background = self.background
-
- if log_path is None:
- if not (background and level < logging.WARNING):
- portage.util.writemsg_level(msg,
- level=level, noiselevel=noiselevel)
- else:
- if not background:
- portage.util.writemsg_level(msg,
- level=level, noiselevel=noiselevel)
-
- f = open(log_path, 'a')
- try:
- f.write(msg)
- finally:
- f.close()
-
-class Binpkg(CompositeTask):
-
- __slots__ = ("find_blockers",
- "ldpath_mtimes", "logger", "opts",
- "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
- ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
- "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
-
- def _writemsg_level(self, msg, level=0, noiselevel=0):
-
- if not self.background:
- portage.util.writemsg_level(msg,
- level=level, noiselevel=noiselevel)
-
- log_path = self.settings.get("PORTAGE_LOG_FILE")
- if log_path is not None:
- f = open(log_path, 'a')
- try:
- f.write(msg)
- finally:
- f.close()
-
- def _start(self):
-
- pkg = self.pkg
- settings = self.settings
- settings.setcpv(pkg)
- self._tree = "bintree"
- self._bintree = self.pkg.root_config.trees[self._tree]
- self._verify = not self.opts.pretend
-
- dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
- "portage", pkg.category, pkg.pf)
- self._build_dir = EbuildBuildDir(dir_path=dir_path,
- pkg=pkg, settings=settings)
- self._image_dir = os.path.join(dir_path, "image")
- self._infloc = os.path.join(dir_path, "build-info")
- self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
- settings["EBUILD"] = self._ebuild_path
- debug = settings.get("PORTAGE_DEBUG") == "1"
- portage.doebuild_environment(self._ebuild_path, "setup",
- settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
- settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
-
- # The prefetcher has already completed or it
- # could be running now. If it's running now,
- # wait for it to complete since it holds
- # a lock on the file being fetched. The
- # portage.locks functions are only designed
- # to work between separate processes. Since
- # the lock is held by the current process,
- # use the scheduler and fetcher methods to
- # synchronize with the fetcher.
- prefetcher = self.prefetcher
- if prefetcher is None:
- pass
- elif not prefetcher.isAlive():
- prefetcher.cancel()
- elif prefetcher.poll() is None:
-
- waiting_msg = ("Fetching '%s' " + \
- "in the background. " + \
- "To view fetch progress, run `tail -f " + \
- "/var/log/emerge-fetch.log` in another " + \
- "terminal.") % prefetcher.pkg_path
- msg_prefix = colorize("GOOD", " * ")
- from textwrap import wrap
- waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
- for line in wrap(waiting_msg, 65))
- if not self.background:
- writemsg(waiting_msg, noiselevel=-1)
-
- self._current_task = prefetcher
- prefetcher.addExitListener(self._prefetch_exit)
- return
-
- self._prefetch_exit(prefetcher)
-
- def _prefetch_exit(self, prefetcher):
-
- pkg = self.pkg
- pkg_count = self.pkg_count
- if not (self.opts.pretend or self.opts.fetchonly):
- self._build_dir.lock()
- # If necessary, discard old log so that we don't
- # append to it.
- self._build_dir.clean_log()
- # Initialze PORTAGE_LOG_FILE.
- portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
- fetcher = BinpkgFetcher(background=self.background,
- logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
- pretend=self.opts.pretend, scheduler=self.scheduler)
- pkg_path = fetcher.pkg_path
- self._pkg_path = pkg_path
-
- if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
-
- msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
- (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
- short_msg = "emerge: (%s of %s) %s Fetch" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv)
- self.logger.log(msg, short_msg=short_msg)
- self._start_task(fetcher, self._fetcher_exit)
- return
-
- self._fetcher_exit(fetcher)
-
- def _fetcher_exit(self, fetcher):
-
- # The fetcher only has a returncode when
- # --getbinpkg is enabled.
- if fetcher.returncode is not None:
- self._fetched_pkg = True
- if self._default_exit(fetcher) != os.EX_OK:
- self._unlock_builddir()
- self.wait()
- return
-
- if self.opts.pretend:
- self._current_task = None
- self.returncode = os.EX_OK
- self.wait()
- return
-
- verifier = None
- if self._verify:
- logfile = None
- if self.background:
- logfile = self.settings.get("PORTAGE_LOG_FILE")
- verifier = BinpkgVerifier(background=self.background,
- logfile=logfile, pkg=self.pkg)
- self._start_task(verifier, self._verifier_exit)
- return
-
- self._verifier_exit(verifier)
-
- def _verifier_exit(self, verifier):
- if verifier is not None and \
- self._default_exit(verifier) != os.EX_OK:
- self._unlock_builddir()
- self.wait()
- return
-
- logger = self.logger
- pkg = self.pkg
- pkg_count = self.pkg_count
- pkg_path = self._pkg_path
-
- if self._fetched_pkg:
- self._bintree.inject(pkg.cpv, filename=pkg_path)
-
- if self.opts.fetchonly:
- self._current_task = None
- self.returncode = os.EX_OK
- self.wait()
- return
-
- msg = " === (%s of %s) Merging Binary (%s::%s)" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
- short_msg = "emerge: (%s of %s) %s Merge Binary" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv)
- logger.log(msg, short_msg=short_msg)
-
- phase = "clean"
- settings = self.settings
- ebuild_phase = EbuildPhase(background=self.background,
- pkg=pkg, phase=phase, scheduler=self.scheduler,
- settings=settings, tree=self._tree)
-
- self._start_task(ebuild_phase, self._clean_exit)
-
- def _clean_exit(self, clean_phase):
- if self._default_exit(clean_phase) != os.EX_OK:
- self._unlock_builddir()
- self.wait()
- return
-
- dir_path = self._build_dir.dir_path
-
- infloc = self._infloc
- pkg = self.pkg
- pkg_path = self._pkg_path
-
- dir_mode = 0755
- for mydir in (dir_path, self._image_dir, infloc):
- portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
- gid=portage.data.portage_gid, mode=dir_mode)
-
- # This initializes PORTAGE_LOG_FILE.
- portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
- self._writemsg_level(">>> Extracting info\n")
-
- pkg_xpak = portage.xpak.tbz2(self._pkg_path)
- check_missing_metadata = ("CATEGORY", "PF")
- missing_metadata = set()
- for k in check_missing_metadata:
- v = pkg_xpak.getfile(k)
- if not v:
- missing_metadata.add(k)
-
- pkg_xpak.unpackinfo(infloc)
- for k in missing_metadata:
- if k == "CATEGORY":
- v = pkg.category
- elif k == "PF":
- v = pkg.pf
- else:
- continue
-
- f = open(os.path.join(infloc, k), 'wb')
- try:
- f.write(v + "\n")
- finally:
- f.close()
-
- # Store the md5sum in the vdb.
- f = open(os.path.join(infloc, "BINPKGMD5"), "w")
- try:
- f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
- finally:
- f.close()
-
- # This gives bashrc users an opportunity to do various things
- # such as remove binary packages after they're installed.
- settings = self.settings
- settings.setcpv(self.pkg)
- settings["PORTAGE_BINPKG_FILE"] = pkg_path
- settings.backup_changes("PORTAGE_BINPKG_FILE")
-
- phase = "setup"
- setup_phase = EbuildPhase(background=self.background,
- pkg=self.pkg, phase=phase, scheduler=self.scheduler,
- settings=settings, tree=self._tree)
-
- setup_phase.addExitListener(self._setup_exit)
- self._current_task = setup_phase
- self.scheduler.scheduleSetup(setup_phase)
-
- def _setup_exit(self, setup_phase):
- if self._default_exit(setup_phase) != os.EX_OK:
- self._unlock_builddir()
- self.wait()
- return
-
- extractor = BinpkgExtractorAsync(background=self.background,
- image_dir=self._image_dir,
- pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
- self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
- self._start_task(extractor, self._extractor_exit)
-
- def _extractor_exit(self, extractor):
- if self._final_exit(extractor) != os.EX_OK:
- self._unlock_builddir()
- writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
- noiselevel=-1)
- self.wait()
-
- def _unlock_builddir(self):
- if self.opts.pretend or self.opts.fetchonly:
- return
- portage.elog.elog_process(self.pkg.cpv, self.settings)
- self._build_dir.unlock()
-
- def install(self):
-
- # This gives bashrc users an opportunity to do various things
- # such as remove binary packages after they're installed.
- settings = self.settings
- settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
- settings.backup_changes("PORTAGE_BINPKG_FILE")
-
- merge = EbuildMerge(find_blockers=self.find_blockers,
- ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
- pkg=self.pkg, pkg_count=self.pkg_count,
- pkg_path=self._pkg_path, scheduler=self.scheduler,
- settings=settings, tree=self._tree, world_atom=self.world_atom)
-
- try:
- retval = merge.execute()
- finally:
- settings.pop("PORTAGE_BINPKG_FILE", None)
- self._unlock_builddir()
- return retval
-
-class BinpkgFetcher(SpawnProcess):
-
- __slots__ = ("pkg", "pretend",
- "locked", "pkg_path", "_lock_obj")
-
- def __init__(self, **kwargs):
- SpawnProcess.__init__(self, **kwargs)
- pkg = self.pkg
- self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
-
- def _start(self):
-
- if self.cancelled:
- return
-
- pkg = self.pkg
- pretend = self.pretend
- bintree = pkg.root_config.trees["bintree"]
- settings = bintree.settings
- use_locks = "distlocks" in settings.features
- pkg_path = self.pkg_path
-
- if not pretend:
- portage.util.ensure_dirs(os.path.dirname(pkg_path))
- if use_locks:
- self.lock()
- exists = os.path.exists(pkg_path)
- resume = exists and os.path.basename(pkg_path) in bintree.invalids
- if not (pretend or resume):
- # Remove existing file or broken symlink.
- try:
- os.unlink(pkg_path)
- except OSError:
- pass
-
- # urljoin doesn't work correctly with
- # unrecognized protocols like sftp
- if bintree._remote_has_index:
- rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
- if not rel_uri:
- rel_uri = pkg.cpv + ".tbz2"
- uri = bintree._remote_base_uri.rstrip("/") + \
- "/" + rel_uri.lstrip("/")
- else:
- uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
- "/" + pkg.pf + ".tbz2"
-
- if pretend:
- portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
- self.returncode = os.EX_OK
- self.wait()
- return
-
- protocol = urlparse.urlparse(uri)[0]
- fcmd_prefix = "FETCHCOMMAND"
- if resume:
- fcmd_prefix = "RESUMECOMMAND"
- fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
- if not fcmd:
- fcmd = settings.get(fcmd_prefix)
-
- fcmd_vars = {
- "DISTDIR" : os.path.dirname(pkg_path),
- "URI" : uri,
- "FILE" : os.path.basename(pkg_path)
- }
-
- fetch_env = dict(settings.iteritems())
- fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
- for x in shlex.split(fcmd)]
-
- if self.fd_pipes is None:
- self.fd_pipes = {}
- fd_pipes = self.fd_pipes
-
- # Redirect all output to stdout since some fetchers like
- # wget pollute stderr (if portage detects a problem then it
- # can send it's own message to stderr).
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stdout.fileno())
-
- self.args = fetch_args
- self.env = fetch_env
- SpawnProcess._start(self)
-
- def _set_returncode(self, wait_retval):
- SpawnProcess._set_returncode(self, wait_retval)
- if self.returncode == os.EX_OK:
- # If possible, update the mtime to match the remote package if
- # the fetcher didn't already do it automatically.
- bintree = self.pkg.root_config.trees["bintree"]
- if bintree._remote_has_index:
- remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
- if remote_mtime is not None:
- try:
- remote_mtime = long(remote_mtime)
- except ValueError:
- pass
- else:
- try:
- local_mtime = long(os.stat(self.pkg_path).st_mtime)
- except OSError:
- pass
- else:
- if remote_mtime != local_mtime:
- try:
- os.utime(self.pkg_path,
- (remote_mtime, remote_mtime))
- except OSError:
- pass
-
- if self.locked:
- self.unlock()
-
- def lock(self):
- """
- This raises an AlreadyLocked exception if lock() is called
- while a lock is already held. In order to avoid this, call
- unlock() or check whether the "locked" attribute is True
- or False before calling lock().
- """
- if self._lock_obj is not None:
- raise self.AlreadyLocked((self._lock_obj,))
+ return True
+ return False
- self._lock_obj = portage.locks.lockfile(
- self.pkg_path, wantnewlockfile=1)
- self.locked = True
+ def __ge__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
+ return True
+ return False
- class AlreadyLocked(portage.exception.PortageException):
- pass
+_all_metadata_keys = set(x for x in portage.auxdbkeys \
+ if not x.startswith("UNUSED_"))
+_all_metadata_keys.discard("CDEPEND")
+_all_metadata_keys.update(Package.metadata_keys)
- def unlock(self):
- if self._lock_obj is None:
- return
- portage.locks.unlockfile(self._lock_obj)
- self._lock_obj = None
- self.locked = False
+from portage.cache.mappings import slot_dict_class
+_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
-class BinpkgVerifier(AsynchronousTask):
- __slots__ = ("logfile", "pkg",)
+class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
+ """
+ Detect metadata updates and synchronize Package attributes.
+ """
- def _start(self):
- """
- Note: Unlike a normal AsynchronousTask.start() method,
- this one does all work is synchronously. The returncode
- attribute will be set before it returns.
- """
+ __slots__ = ("_pkg",)
+ _wrapped_keys = frozenset(
+ ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
- pkg = self.pkg
- root_config = pkg.root_config
- bintree = root_config.trees["bintree"]
- rval = os.EX_OK
- stdout_orig = sys.stdout
- stderr_orig = sys.stderr
- log_file = None
- if self.background and self.logfile is not None:
- log_file = open(self.logfile, 'a')
- try:
- if log_file is not None:
- sys.stdout = log_file
- sys.stderr = log_file
- try:
- bintree.digestCheck(pkg)
- except portage.exception.FileNotFound:
- writemsg("!!! Fetching Binary failed " + \
- "for '%s'\n" % pkg.cpv, noiselevel=-1)
- rval = 1
- except portage.exception.DigestException, e:
- writemsg("\n!!! Digest verification failed:\n",
- noiselevel=-1)
- writemsg("!!! %s\n" % e.value[0],
- noiselevel=-1)
- writemsg("!!! Reason: %s\n" % e.value[1],
- noiselevel=-1)
- writemsg("!!! Got: %s\n" % e.value[2],
- noiselevel=-1)
- writemsg("!!! Expected: %s\n" % e.value[3],
- noiselevel=-1)
- rval = 1
- if rval != os.EX_OK:
- pkg_path = bintree.getname(pkg.cpv)
- head, tail = os.path.split(pkg_path)
- temp_filename = portage._checksum_failure_temp_file(head, tail)
- writemsg("File renamed to '%s'\n" % (temp_filename,),
- noiselevel=-1)
- finally:
- sys.stdout = stdout_orig
- sys.stderr = stderr_orig
- if log_file is not None:
- log_file.close()
+ def __init__(self, pkg, metadata):
+ _PackageMetadataWrapperBase.__init__(self)
+ self._pkg = pkg
+ self.update(metadata)
- self.returncode = rval
- self.wait()
+ def __setitem__(self, k, v):
+ _PackageMetadataWrapperBase.__setitem__(self, k, v)
+ if k in self._wrapped_keys:
+ getattr(self, "_set_" + k.lower())(k, v)
-class BinpkgPrefetcher(CompositeTask):
+ def _set_inherited(self, k, v):
+ if isinstance(v, basestring):
+ v = frozenset(v.split())
+ self._pkg.inherited = v
- __slots__ = ("pkg",) + \
- ("pkg_path", "_bintree",)
+ def _set_iuse(self, k, v):
+ self._pkg.iuse = self._pkg._iuse(
+ v.split(), self._pkg.root_config.iuse_implicit)
- def _start(self):
- self._bintree = self.pkg.root_config.trees["bintree"]
- fetcher = BinpkgFetcher(background=self.background,
- logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
- scheduler=self.scheduler)
- self.pkg_path = fetcher.pkg_path
- self._start_task(fetcher, self._fetcher_exit)
+ def _set_slot(self, k, v):
+ self._pkg.slot = v
- def _fetcher_exit(self, fetcher):
+ def _set_use(self, k, v):
+ self._pkg.use = self._pkg._use(v.split())
- if self._default_exit(fetcher) != os.EX_OK:
- self.wait()
- return
+ def _set_counter(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.counter = v
- verifier = BinpkgVerifier(background=self.background,
- logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
- self._start_task(verifier, self._verifier_exit)
+ def _set__mtime_(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.mtime = v
- def _verifier_exit(self, verifier):
- if self._default_exit(verifier) != os.EX_OK:
- self.wait()
- return
+class PackageUninstall(AsynchronousTask):
- self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
+ __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
- self._current_task = None
- self.returncode = os.EX_OK
+ def _start(self):
+ try:
+ unmerge(self.pkg.root_config, self.opts, "unmerge",
+ [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
+ clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
+ writemsg_level=self._writemsg_level)
+ except UninstallFailure, e:
+ self.returncode = e.status
+ else:
+ self.returncode = os.EX_OK
self.wait()
-class BinpkgExtractorAsync(SpawnProcess):
-
- __slots__ = ("image_dir", "pkg", "pkg_path")
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
- _shell_binary = portage.const.BASH_BINARY
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ background = self.background
- def _start(self):
- self.args = [self._shell_binary, "-c",
- "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
- (portage._shell_quote(self.pkg_path),
- portage._shell_quote(self.image_dir))]
+ if log_path is None:
+ if not (background and level < logging.WARNING):
+ portage.util.writemsg_level(msg,
+ level=level, noiselevel=noiselevel)
+ else:
+ if not background:
+ portage.util.writemsg_level(msg,
+ level=level, noiselevel=noiselevel)
- self.env = self.pkg.root_config.settings.environ()
- SpawnProcess._start(self)
+ f = open(log_path, 'a')
+ try:
+ f.write(msg)
+ finally:
+ f.close()
class MergeListItem(CompositeTask):
retval = self._install_task.install()
return retval
-class PackageMerge(AsynchronousTask):
- """
- TODO: Implement asynchronous merge so that the scheduler can
- run while a merge is executing.
- """
-
- __slots__ = ("merge",)
-
- def _start(self):
-
- pkg = self.merge.pkg
- pkg_count = self.merge.pkg_count
-
- if pkg.installed:
- action_desc = "Uninstalling"
- preposition = "from"
- counter_str = ""
- else:
- action_desc = "Installing"
- preposition = "to"
- counter_str = "(%s of %s) " % \
- (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
- colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
-
- msg = "%s %s%s" % \
- (action_desc,
- counter_str,
- colorize("GOOD", pkg.cpv))
-
- if pkg.root != "/":
- msg += " %s %s" % (preposition, pkg.root)
-
- if not self.merge.build_opts.fetchonly and \
- not self.merge.build_opts.pretend and \
- not self.merge.build_opts.buildpkgonly:
- self.merge.statusMessage(msg)
-
- self.returncode = self.merge.merge()
- self.wait()
-
-class DependencyArg(object):
- def __init__(self, arg=None, root_config=None):
- self.arg = arg
- self.root_config = root_config
-
- def __str__(self):
- return str(self.arg)
-
-class AtomArg(DependencyArg):
- def __init__(self, atom=None, **kwargs):
- DependencyArg.__init__(self, **kwargs)
- self.atom = atom
- if not isinstance(self.atom, portage.dep.Atom):
- self.atom = portage.dep.Atom(self.atom)
- self.set = (self.atom, )
-
-class PackageArg(DependencyArg):
- def __init__(self, package=None, **kwargs):
- DependencyArg.__init__(self, **kwargs)
- self.package = package
- self.atom = portage.dep.Atom("=" + package.cpv)
- self.set = (self.atom, )
-
-class SetArg(DependencyArg):
- def __init__(self, set=None, **kwargs):
- DependencyArg.__init__(self, **kwargs)
- self.set = set
- self.name = self.arg[len(SETPREFIX):]
-
-class Dependency(SlotObject):
- __slots__ = ("atom", "blocker", "depth",
- "parent", "onlydeps", "priority", "root")
- def __init__(self, **kwargs):
- SlotObject.__init__(self, **kwargs)
- if self.priority is None:
- self.priority = DepPriority()
- if self.depth is None:
- self.depth = 0
-
-class BlockerCache(portage.cache.mappings.MutableMapping):
- """This caches blockers of installed packages so that dep_check does not
- have to be done for every single installed package on every invocation of
- emerge. The cache is invalidated whenever it is detected that something
- has changed that might alter the results of dep_check() calls:
- 1) the set of installed packages (including COUNTER) has changed
- 2) the old-style virtuals have changed
- """
-
- # Number of uncached packages to trigger cache update, since
- # it's wasteful to update it for every vdb change.
- _cache_threshold = 5
-
- class BlockerData(object):
-
- __slots__ = ("__weakref__", "atoms", "counter")
-
- def __init__(self, counter, atoms):
- self.counter = counter
- self.atoms = atoms
-
- def __init__(self, myroot, vardb):
- self._vardb = vardb
- self._virtuals = vardb.settings.getvirtuals()
- self._cache_filename = os.path.join(myroot,
- portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
- self._cache_version = "1"
- self._cache_data = None
- self._modified = set()
- self._load()
-
- def _load(self):
- try:
- f = open(self._cache_filename, mode='rb')
- mypickle = pickle.Unpickler(f)
- try:
- mypickle.find_global = None
- except AttributeError:
- # TODO: If py3k, override Unpickler.find_class().
- pass
- self._cache_data = mypickle.load()
- f.close()
- del f
- except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
- if isinstance(e, pickle.UnpicklingError):
- writemsg("!!! Error loading '%s': %s\n" % \
- (self._cache_filename, str(e)), noiselevel=-1)
- del e
-
- cache_valid = self._cache_data and \
- isinstance(self._cache_data, dict) and \
- self._cache_data.get("version") == self._cache_version and \
- isinstance(self._cache_data.get("blockers"), dict)
- if cache_valid:
- # Validate all the atoms and counters so that
- # corruption is detected as soon as possible.
- invalid_items = set()
- for k, v in self._cache_data["blockers"].iteritems():
- if not isinstance(k, basestring):
- invalid_items.add(k)
- continue
- try:
- if portage.catpkgsplit(k) is None:
- invalid_items.add(k)
- continue
- except portage.exception.InvalidData:
- invalid_items.add(k)
- continue
- if not isinstance(v, tuple) or \
- len(v) != 2:
- invalid_items.add(k)
- continue
- counter, atoms = v
- if not isinstance(counter, (int, long)):
- invalid_items.add(k)
- continue
- if not isinstance(atoms, (list, tuple)):
- invalid_items.add(k)
- continue
- invalid_atom = False
- for atom in atoms:
- if not isinstance(atom, basestring):
- invalid_atom = True
- break
- if atom[:1] != "!" or \
- not portage.isvalidatom(
- atom, allow_blockers=True):
- invalid_atom = True
- break
- if invalid_atom:
- invalid_items.add(k)
- continue
-
- for k in invalid_items:
- del self._cache_data["blockers"][k]
- if not self._cache_data["blockers"]:
- cache_valid = False
-
- if not cache_valid:
- self._cache_data = {"version":self._cache_version}
- self._cache_data["blockers"] = {}
- self._cache_data["virtuals"] = self._virtuals
- self._modified.clear()
-
- def flush(self):
- """If the current user has permission and the internal blocker cache
- been updated, save it to disk and mark it unmodified. This is called
- by emerge after it has proccessed blockers for all installed packages.
- Currently, the cache is only written if the user has superuser
- privileges (since that's required to obtain a lock), but all users
- have read access and benefit from faster blocker lookups (as long as
- the entire cache is still valid). The cache is stored as a pickled
- dict object with the following format:
-
- {
- version : "1",
- "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
- "virtuals" : vardb.settings.getvirtuals()
- }
- """
- if len(self._modified) >= self._cache_threshold and \
- secpass >= 2:
- try:
- f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
- pickle.dump(self._cache_data, f, protocol=2)
- f.close()
- portage.util.apply_secpass_permissions(
- self._cache_filename, gid=portage.portage_gid, mode=0644)
- except (IOError, OSError), e:
- pass
- self._modified.clear()
-
- def __setitem__(self, cpv, blocker_data):
- """
- Update the cache and mark it as modified for a future call to
- self.flush().
-
- @param cpv: Package for which to cache blockers.
- @type cpv: String
- @param blocker_data: An object with counter and atoms attributes.
- @type blocker_data: BlockerData
- """
- self._cache_data["blockers"][cpv] = \
- (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
- self._modified.add(cpv)
-
- def __iter__(self):
- if self._cache_data is None:
- # triggered by python-trace
- return iter([])
- return iter(self._cache_data["blockers"])
-
- def __delitem__(self, cpv):
- del self._cache_data["blockers"][cpv]
-
- def __getitem__(self, cpv):
- """
- @rtype: BlockerData
- @returns: An object with counter and atoms attributes.
- """
- return self.BlockerData(*self._cache_data["blockers"][cpv])
-
class BlockerDB(object):
def __init__(self, root_config):
msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
-class PackageVirtualDbapi(portage.dbapi):
- """
- A dbapi-like interface class that represents the state of the installed
- package database as new packages are installed, replacing any packages
- that previously existed in the same slot. The main difference between
- this class and fakedbapi is that this one uses Package instances
- internally (passed in via cpv_inject() and cpv_remove() calls).
- """
- def __init__(self, settings):
- portage.dbapi.__init__(self)
- self.settings = settings
- self._match_cache = {}
- self._cp_map = {}
- self._cpv_map = {}
-
- def clear(self):
- """
- Remove all packages.
- """
- if self._cpv_map:
- self._clear_cache()
- self._cp_map.clear()
- self._cpv_map.clear()
-
- def copy(self):
- obj = PackageVirtualDbapi(self.settings)
- obj._match_cache = self._match_cache.copy()
- obj._cp_map = self._cp_map.copy()
- for k, v in obj._cp_map.iteritems():
- obj._cp_map[k] = v[:]
- obj._cpv_map = self._cpv_map.copy()
- return obj
-
- def __iter__(self):
- return self._cpv_map.itervalues()
-
- def __contains__(self, item):
- existing = self._cpv_map.get(item.cpv)
- if existing is not None and \
- existing == item:
- return True
- return False
-
- def get(self, item, default=None):
- cpv = getattr(item, "cpv", None)
- if cpv is None:
- if len(item) != 4:
- return default
- type_name, root, cpv, operation = item
-
- existing = self._cpv_map.get(cpv)
- if existing is not None and \
- existing == item:
- return existing
- return default
-
- def match_pkgs(self, atom):
- return [self._cpv_map[cpv] for cpv in self.match(atom)]
-
- def _clear_cache(self):
- if self._categories is not None:
- self._categories = None
- if self._match_cache:
- self._match_cache = {}
-
- def match(self, origdep, use_cache=1):
- result = self._match_cache.get(origdep)
- if result is not None:
- return result[:]
- result = portage.dbapi.match(self, origdep, use_cache=use_cache)
- self._match_cache[origdep] = result
- return result[:]
-
- def cpv_exists(self, cpv):
- return cpv in self._cpv_map
-
- def cp_list(self, mycp, use_cache=1):
- cachelist = self._match_cache.get(mycp)
- # cp_list() doesn't expand old-style virtuals
- if cachelist and cachelist[0].startswith(mycp):
- return cachelist[:]
- cpv_list = self._cp_map.get(mycp)
- if cpv_list is None:
- cpv_list = []
- else:
- cpv_list = [pkg.cpv for pkg in cpv_list]
- self._cpv_sort_ascending(cpv_list)
- if not (not cpv_list and mycp.startswith("virtual/")):
- self._match_cache[mycp] = cpv_list
- return cpv_list[:]
-
- def cp_all(self):
- return list(self._cp_map)
-
- def cpv_all(self):
- return list(self._cpv_map)
-
- def cpv_inject(self, pkg):
- cp_list = self._cp_map.get(pkg.cp)
- if cp_list is None:
- cp_list = []
- self._cp_map[pkg.cp] = cp_list
- e_pkg = self._cpv_map.get(pkg.cpv)
- if e_pkg is not None:
- if e_pkg == pkg:
- return
- self.cpv_remove(e_pkg)
- for e_pkg in cp_list:
- if e_pkg.slot_atom == pkg.slot_atom:
- if e_pkg == pkg:
- return
- self.cpv_remove(e_pkg)
- break
- cp_list.append(pkg)
- self._cpv_map[pkg.cpv] = pkg
- self._clear_cache()
-
- def cpv_remove(self, pkg):
- old_pkg = self._cpv_map.get(pkg.cpv)
- if old_pkg != pkg:
- raise KeyError(pkg)
- self._cp_map[pkg.cp].remove(pkg)
- del self._cpv_map[pkg.cpv]
- self._clear_cache()
-
- def aux_get(self, cpv, wants):
- metadata = self._cpv_map[cpv].metadata
- return [metadata.get(x, "") for x in wants]
-
- def aux_update(self, cpv, values):
- self._cpv_map[cpv].metadata.update(values)
- self._clear_cache()
-
class depgraph(object):
pkg_tree_map = RootConfig.pkg_tree_map
metadata = self._cpv_pkg_map[cpv].metadata
return [metadata.get(x, "") for x in wants]
-class RepoDisplay(object):
- def __init__(self, roots):
- self._shown_repos = {}
- self._unknown_repo = False
- repo_paths = set()
- for root_config in roots.itervalues():
- portdir = root_config.settings.get("PORTDIR")
- if portdir:
- repo_paths.add(portdir)
- overlays = root_config.settings.get("PORTDIR_OVERLAY")
- if overlays:
- repo_paths.update(overlays.split())
- repo_paths = list(repo_paths)
- self._repo_paths = repo_paths
- self._repo_paths_real = [ os.path.realpath(repo_path) \
- for repo_path in repo_paths ]
-
- # pre-allocate index for PORTDIR so that it always has index 0.
- for root_config in roots.itervalues():
- portdb = root_config.trees["porttree"].dbapi
- portdir = portdb.porttree_root
- if portdir:
- self.repoStr(portdir)
-
- def repoStr(self, repo_path_real):
- real_index = -1
- if repo_path_real:
- real_index = self._repo_paths_real.index(repo_path_real)
- if real_index == -1:
- s = "?"
- self._unknown_repo = True
- else:
- shown_repos = self._shown_repos
- repo_paths = self._repo_paths
- repo_path = repo_paths[real_index]
- index = shown_repos.get(repo_path)
- if index is None:
- index = len(shown_repos)
- shown_repos[repo_path] = index
- s = str(index)
- return s
-
- def __str__(self):
- output = []
- shown_repos = self._shown_repos
- unknown_repo = self._unknown_repo
- if shown_repos or self._unknown_repo:
- output.append("Portage tree and overlays:\n")
- show_repo_paths = list(shown_repos)
- for repo_path, repo_index in shown_repos.iteritems():
- show_repo_paths[repo_index] = repo_path
- if show_repo_paths:
- for index, repo_path in enumerate(show_repo_paths):
- output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
- if unknown_repo:
- output.append(" "+teal("[?]") + \
- " indicates that the source repository could not be determined\n")
- return "".join(output)
-
class PackageCounters(object):
def __init__(self):
(self.blocks - self.blocks_satisfied))
return "".join(myoutput)
-class UseFlagDisplay(object):
-
- __slots__ = ('name', 'enabled', 'forced')
-
- def __init__(self, name, enabled, forced):
- self.name = name
- self.enabled = enabled
- self.forced = forced
-
- def __str__(self):
- s = self.name
- if self.enabled:
- s = red(s)
- else:
- s = '-' + s
- s = blue(s)
- if self.forced:
- s = '(%s)' % s
- return s
-
- def _cmp_combined(a, b):
- """
- Sort by name, combining enabled and disabled flags.
- """
- return (a.name > b.name) - (a.name < b.name)
-
- sort_combined = cmp_sort_key(_cmp_combined)
- del _cmp_combined
-
- def _cmp_separated(a, b):
- """
- Sort by name, separating enabled flags from disabled flags.
- """
- enabled_diff = b.enabled - a.enabled
- if enabled_diff:
- return enabled_diff
- return (a.name > b.name) - (a.name < b.name)
-
- sort_separated = cmp_sort_key(_cmp_separated)
- del _cmp_separated
-
-class PollSelectAdapter(PollConstants):
-
- """
- Use select to emulate a poll object, for
- systems that don't support poll().
- """
-
- def __init__(self):
- self._registered = {}
- self._select_args = [[], [], []]
-
- def register(self, fd, *args):
- """
- Only POLLIN is currently supported!
- """
- if len(args) > 1:
- raise TypeError(
- "register expected at most 2 arguments, got " + \
- repr(1 + len(args)))
-
- eventmask = PollConstants.POLLIN | \
- PollConstants.POLLPRI | PollConstants.POLLOUT
- if args:
- eventmask = args[0]
-
- self._registered[fd] = eventmask
- self._select_args = None
-
- def unregister(self, fd):
- self._select_args = None
- del self._registered[fd]
-
- def poll(self, *args):
- if len(args) > 1:
- raise TypeError(
- "poll expected at most 2 arguments, got " + \
- repr(1 + len(args)))
-
- timeout = None
- if args:
- timeout = args[0]
-
- select_args = self._select_args
- if select_args is None:
- select_args = [self._registered.keys(), [], []]
-
- if timeout is not None:
- select_args = select_args[:]
- # Translate poll() timeout args to select() timeout args:
- #
- # | units | value(s) for indefinite block
- # ---------|--------------|------------------------------
- # poll | milliseconds | omitted, negative, or None
- # ---------|--------------|------------------------------
- # select | seconds | omitted
- # ---------|--------------|------------------------------
-
- if timeout is not None and timeout < 0:
- timeout = None
- if timeout is not None:
- select_args.append(timeout / 1000)
-
- select_events = select.select(*select_args)
- poll_events = []
- for fd in select_events[0]:
- poll_events.append((fd, PollConstants.POLLIN))
- return poll_events
-
-class SequentialTaskQueue(SlotObject):
-
- __slots__ = ("max_jobs", "running_tasks") + \
- ("_dirty", "_scheduling", "_task_queue")
-
- def __init__(self, **kwargs):
- SlotObject.__init__(self, **kwargs)
- self._task_queue = deque()
- self.running_tasks = set()
- if self.max_jobs is None:
- self.max_jobs = 1
- self._dirty = True
-
- def add(self, task):
- self._task_queue.append(task)
- self._dirty = True
-
- def addFront(self, task):
- self._task_queue.appendleft(task)
- self._dirty = True
-
- def schedule(self):
-
- if not self._dirty:
- return False
-
- if not self:
- return False
-
- if self._scheduling:
- # Ignore any recursive schedule() calls triggered via
- # self._task_exit().
- return False
-
- self._scheduling = True
-
- task_queue = self._task_queue
- running_tasks = self.running_tasks
- max_jobs = self.max_jobs
- state_changed = False
-
- while task_queue and \
- (max_jobs is True or len(running_tasks) < max_jobs):
- task = task_queue.popleft()
- cancelled = getattr(task, "cancelled", None)
- if not cancelled:
- running_tasks.add(task)
- task.addExitListener(self._task_exit)
- task.start()
- state_changed = True
-
- self._dirty = False
- self._scheduling = False
-
- return state_changed
-
- def _task_exit(self, task):
- """
- Since we can always rely on exit listeners being called, the set of
- running tasks is always pruned automatically and there is never any need
- to actively prune it.
- """
- self.running_tasks.remove(task)
- if self._task_queue:
- self._dirty = True
-
- def clear(self):
- self._task_queue.clear()
- running_tasks = self.running_tasks
- while running_tasks:
- task = running_tasks.pop()
- task.removeExitListener(self._task_exit)
- task.cancel()
- self._dirty = False
-
- def __nonzero__(self):
- return bool(self._task_queue or self.running_tasks)
-
- def __len__(self):
- return len(self._task_queue) + len(self.running_tasks)
_can_poll_device = None
if self.xterm_titles:
xtermTitle(" ".join(plain_output.split()))
-class ProgressHandler(object):
- def __init__(self):
- self.curval = 0
- self.maxval = 0
- self._last_update = 0
- self.min_latency = 0.2
-
- def onProgress(self, maxval, curval):
- self.maxval = maxval
- self.curval = curval
- cur_time = time.time()
- if cur_time - self._last_update >= self.min_latency:
- self._last_update = cur_time
- self.display()
-
- def display(self):
- raise NotImplementedError(self)
-
class Scheduler(PollScheduler):
_opts_ignore_blockers = \