__slots__ = ("built", "cpv", "depth",
"installed", "metadata", "onlydeps", "operation",
"root", "type_name",
- "cp", "cpv_slot", "pv_split", "slot_atom")
+ "category", "cp", "cpv_slot", "pf", "pv_split", "slot_atom")
+
+ metadata_keys = [
+ "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
+ "repository", "RESTRICT", "SLOT", "USE"]
+
def __init__(self, **kwargs):
Task.__init__(self, **kwargs)
self.cp = portage.cpv_getkey(self.cpv)
self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
- self.pv_split = portage.catpkgsplit(self.cpv)[1:]
+ cpv_parts = portage.catpkgsplit(self.cpv)
+ self.category = cpv_parts[0]
+ self.pv_split = cpv_parts[1:]
+ self.pf = self.cpv.replace(self.category + "/", "", 1)
def _get_hash_key(self):
hash_key = getattr(self, "_hash_key", None)
2) the old-style virtuals have changed
"""
class BlockerData(object):
+
+ __slots__ = ("__weakref__", "atoms", "counter")
+
def __init__(self, counter, atoms):
self.counter = counter
self.atoms = atoms
an AttributeError."""
return list(self)
+class BlockerDB(object):
+
+ def __init__(self, vartree, portdb):
+ self._vartree = vartree
+ self._portdb = portdb
+ self._blocker_cache = \
+ BlockerCache(self._vartree.root, vartree.dbapi)
+ self._dep_check_trees = { self._vartree.root : {
+ "porttree" : self._vartree,
+ "vartree" : self._vartree,
+ }}
+ self._installed_pkgs = None
+
+ def findInstalledBlockers(self, new_pkg):
+ self._update_cache()
+ blocker_parents = digraph()
+ blocker_atoms = []
+ for pkg in self._installed_pkgs:
+ for blocker_atom in self._blocker_cache[pkg.cpv].atoms:
+ blocker_atom = blocker_atom[1:]
+ blocker_atoms.append(blocker_atom)
+ blocker_parents.add(blocker_atom, pkg)
+
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+ blocking_pkgs = set()
+ for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
+ blocking_pkgs.update(blocker_parents.parent_nodes(atom))
+ return blocking_pkgs
+
+ def _update_cache(self):
+ blocker_cache = self._blocker_cache
+ dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+ dep_check_trees = self._dep_check_trees
+ settings = self._vartree.settings
+ stale_cache = set(blocker_cache)
+ fake_vartree = \
+ FakeVartree(self._vartree,
+ self._portdb, Package.metadata_keys, {})
+ vardb = fake_vartree.dbapi
+ self._installed_pkgs = list(vardb)
+
+ for inst_pkg in self._installed_pkgs:
+ stale_cache.discard(inst_pkg.cpv)
+ cached_blockers = blocker_cache.get(inst_pkg.cpv)
+ if cached_blockers is not None and \
+ cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
+ cached_blockers = None
+ if cached_blockers is not None:
+ blocker_atoms = cached_blockers.atoms
+ else:
+ myuse = inst_pkg.metadata["USE"].split()
+ # Use aux_get() to trigger FakeVartree global
+ # updates on *DEPEND when appropriate.
+ depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
+ try:
+ portage.dep._dep_check_strict = False
+ success, atoms = portage.dep_check(depstr,
+ vardb, settings, myuse=myuse,
+ trees=dep_check_trees, myroot=inst_pkg.root)
+ finally:
+ portage.dep._dep_check_strict = True
+ if not success:
+ pkg_location = os.path.join(inst_pkg.root,
+ portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
+ portage.writemsg("!!! %s/*DEPEND: %s\n" % \
+ (pkg_location, atoms), noiselevel=-1)
+ continue
+
+ blocker_atoms = [atom for atom in atoms \
+ if atom.startswith("!")]
+ blocker_atoms.sort()
+ counter = long(inst_pkg.metadata["COUNTER"])
+ blocker_cache[inst_pkg.cpv] = \
+ blocker_cache.BlockerData(counter, blocker_atoms)
+ for cpv in stale_cache:
+ del blocker_cache[cpv]
+ blocker_cache.flush()
+
def show_invalid_depstring_notice(parent_node, depstring, error_msg):
from formatter import AbstractFormatter, DumbWriter
"binary":"bintree",
"installed":"vartree"}
- _mydbapi_keys = [
- "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
- "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
- "repository", "RESTRICT", "SLOT", "USE"]
+ _mydbapi_keys = Package.metadata_keys
_dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
return -1
myblocker_uninstalls = self._blocker_uninstalls.copy()
retlist=[]
+ # Contains uninstall tasks that have been scheduled to
+ # occur after overlapping blockers have been installed.
+ scheduled_uninstalls = set()
# Contains any Uninstall tasks that have been ignored
# in order to avoid the circular deps code path. These
# correspond to blocker conflicts that could not be
selected_nodes = list(selected_nodes)
selected_nodes.sort(cmp_circular_bias)
+ if not selected_nodes and scheduled_uninstalls:
+ selected_nodes = set()
+ for node in scheduled_uninstalls:
+ if not mygraph.child_nodes(node):
+ selected_nodes.add(node)
+ scheduled_uninstalls.difference_update(selected_nodes)
+
if not selected_nodes and not myblocker_uninstalls.is_empty():
# An Uninstall task needs to be executed in order to
# avoid conflict if possible.
-
min_parent_deps = None
uninst_task = None
for task in myblocker_uninstalls.leaf_nodes():
uninst_task = task
if uninst_task is not None:
- selected_nodes = [uninst_task]
+ # The uninstall is performed only after blocking
+ # packages have been merged on top of it. File
+ # collisions between blocking packages are detected
+ # and removed from the list of files to be uninstalled.
+ scheduled_uninstalls.add(uninst_task)
+ parent_nodes = mygraph.parent_nodes(uninst_task)
+
+ # Reverse the parent -> uninstall edges since we want
+ # to do the uninstall after blocking packages have
+ # been merged on top of it.
+ mygraph.remove(uninst_task)
+ for blocked_pkg in parent_nodes:
+ mygraph.add(blocked_pkg, uninst_task,
+ priority=BlockerDepPriority.instance)
else:
# None of the Uninstall tasks are acceptable, so
# the corresponding blockers are unresolvable.
ignored_uninstall_tasks.add(node)
break
- # After dropping an Uninstall task, reset
- # the state variables for leaf node selection and
- # continue trying to select leaf nodes.
- prefer_asap = True
- accept_root_node = False
- continue
+ # After dropping an Uninstall task, reset
+ # the state variables for leaf node selection and
+ # continue trying to select leaf nodes.
+ prefer_asap = True
+ accept_root_node = False
+ continue
if not selected_nodes:
self._circular_deps_for_display = mygraph
verbosity = ("--quiet" in self.myopts and 1 or \
"--verbose" in self.myopts and 3 or 2)
favorites_set = InternalPackageSet(favorites)
+ oneshot = "--oneshot" in self.myopts or \
+ "--onlydeps" in self.myopts
changelogs=[]
p=[]
blockers = []
try:
pkg_system = system_set.findAtomForPackage(pkg_key, metadata)
pkg_world = world_set.findAtomForPackage(pkg_key, metadata)
- if not pkg_world and myroot == self.target_root and \
+ if not (oneshot or pkg_world) and \
+ myroot == self.target_root and \
favorites_set.findAtomForPackage(pkg_key, metadata):
# Maybe it will be added to world now.
if create_world_atom(pkg_key, metadata,
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.pkgsettings = {}
- self.pkgsettings[self.target_root] = portage.config(clone=settings)
- if self.target_root != "/":
- self.pkgsettings["/"] = \
- portage.config(clone=trees["/"]["vartree"].settings)
+ self._blocker_db = {}
+ for root in trees:
+ self.pkgsettings[root] = portage.config(
+ clone=trees[root]["vartree"].settings)
+ self._blocker_db[root] = BlockerDB(
+ trees[root]["vartree"],
+ trees[root]["porttree"].dbapi)
self.curval = 0
self._spawned_pids = []
- self._uninstall_queue = []
+
+ def _find_blockers(self, new_pkg):
+ for opt in ("--buildpkgonly", "--nodeps",
+ "--fetchonly", "--fetch-all-uri", "--pretend"):
+ if opt in self.myopts:
+ return None
+
+ blocker_dblinks = []
+ for blocking_pkg in self._blocker_db[
+ new_pkg.root].findInstalledBlockers(new_pkg):
+ if new_pkg.slot_atom == blocking_pkg.slot_atom:
+ continue
+ if new_pkg.cpv == blocking_pkg.cpv:
+ continue
+ blocker_dblinks.append(portage.dblink(
+ blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
+ self.pkgsettings[blocking_pkg.root], treetype="vartree",
+ vartree=self.trees[blocking_pkg.root]["vartree"]))
+
+ return blocker_dblinks
def merge(self, mylist, favorites, mtimedb):
try:
pass
spawned_pids.remove(pid)
- def _dequeue_uninstall_tasks(self, mtimedb):
- if not self._uninstall_queue:
- return
- for uninst_task in self._uninstall_queue:
- root_config = self.trees[uninst_task.root]["root_config"]
- unmerge(root_config.settings, self.myopts,
- root_config.trees["vartree"], "unmerge",
- [uninst_task.cpv], mtimedb["ldpath"], clean_world=0)
- del mtimedb["resume"]["mergelist"][0]
- mtimedb.commit()
- del self._uninstall_queue[:]
-
def _merge(self, mylist, favorites, mtimedb):
failed_fetches = []
buildpkgonly = "--buildpkgonly" in self.myopts
metadata = pkg.metadata
if pkg.installed:
if not (buildpkgonly or fetchonly or pretend):
- self._uninstall_queue.append(pkg)
+ unmerge(root_config.settings, self.myopts,
+ root_config.trees["vartree"], "unmerge",
+ [pkg.cpv], mtimedb["ldpath"], clean_world=0)
+ del mtimedb["resume"]["mergelist"][0]
+ mtimedb.commit()
continue
if x[0]=="blocks":
bintree = self.trees[myroot]["bintree"]
if bintree.populated:
bintree.inject(pkg_key)
- self._dequeue_uninstall_tasks(mtimedb)
+
if "--buildpkgonly" not in self.myopts:
msg = " === (%s of %s) Merging (%s::%s)" % \
(mergecount, len(mymergelist), pkg_key, y)
"build-info"), myroot, pkgsettings,
myebuild=pkgsettings["EBUILD"],
mytree="porttree", mydbapi=portdb,
- vartree=vartree, prev_mtimes=ldpath_mtimes)
+ vartree=vartree, prev_mtimes=ldpath_mtimes,
+ blockers=self._find_blockers(pkg))
if retval != os.EX_OK:
return retval
elif "noclean" not in pkgsettings.features:
prev_mtimes=ldpath_mtimes)
if retval != os.EX_OK:
return retval
- self._dequeue_uninstall_tasks(mtimedb)
+
retval = portage.merge(pkgsettings["CATEGORY"],
pkgsettings["PF"], pkgsettings["D"],
os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
"build-info"), myroot, pkgsettings,
myebuild=pkgsettings["EBUILD"],
mytree="porttree", mydbapi=portdb,
- vartree=vartree, prev_mtimes=ldpath_mtimes)
+ vartree=vartree, prev_mtimes=ldpath_mtimes,
+ blockers=self._find_blockers(pkg))
if retval != os.EX_OK:
return retval
finally:
portage_locks.unlockdir(catdir_lock)
elif x[0]=="binary":
- self._dequeue_uninstall_tasks(mtimedb)
#merge the tbz2
mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key)
if "--getbinpkg" in self.myopts:
retval = portage.pkgmerge(mytbz2, x[1], pkgsettings,
mydbapi=bindb,
vartree=self.trees[myroot]["vartree"],
- prev_mtimes=ldpath_mtimes)
+ prev_mtimes=ldpath_mtimes,
+ blockers=self._find_blockers(pkg))
if retval != os.EX_OK:
return retval
#need to check for errors
fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
ask = "--ask" in myopts
nodeps = "--nodeps" in myopts
+ oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
tree = "--tree" in myopts
if nodeps and tree:
tree = False
mergecount += 1
if mergecount==0:
- if "--noreplace" in myopts and favorites:
+ if "--noreplace" in myopts and not oneshot and favorites:
print
for x in favorites:
print " %s %s" % (good("*"), x)
return newmtime
def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
- mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
+ mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None):
if not os.access(myroot, os.W_OK):
writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
noiselevel=-1)
return errno.EACCES
mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
- vartree=vartree)
+ vartree=vartree, blockers=blockers)
return mylink.merge(pkgloc, infloc, myroot, myebuild,
mydbapi=mydbapi, prev_mtimes=prev_mtimes)
}
def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
- vartree=None):
+ vartree=None, blockers=None):
"""
Creates a DBlink object for a given CPV.
The given CPV may not be present in the database already.
global db
vartree = db[myroot]["vartree"]
self.vartree = vartree
+ self._blockers = blockers
self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
self.dbcatdir = self.dbroot+"/"+cat
if os.path.exists(self.dbdir+"/CONTENTS"):
os.unlink(self.dbdir+"/CONTENTS")
+ def _clear_contents_cache(self):
+ self.contentscache = None
+ self._contents_inodes = None
+ self._contents_basenames = None
+
def getcontents(self):
"""
Get the installed files of a given package (aka what that package installed)
"""
srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
+ destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
if not os.path.isdir(srcroot):
writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
return 1
# check for package collisions
+ blockers = self._blockers
+ if blockers is None:
+ blockers = []
if True:
collision_ignore = set([normalize_path(myignore) for myignore in \
self.settings.get("COLLISION_IGNORE", "").split()])
if f[0] != "/":
f="/"+f
isowned = False
- for ver in [self] + others_in_slot:
+ for ver in [self] + others_in_slot + blockers:
if (ver.isowner(f, destroot) or ver.isprotected(f)):
isowned = True
break
self.dbdir = self.dbpkgdir
self.delete()
_movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+
+ # Check for file collisions with blocking packages
+ # and remove any colliding files from their CONTENTS
+ # since they now belong to this package.
+ self._clear_contents_cache()
+ contents = self.getcontents()
+ destroot_len = len(destroot) - 1
+ for blocker in blockers:
+ blocker_contents = blocker.getcontents()
+ collisions = []
+ for filename in blocker_contents:
+ relative_filename = filename[destroot_len:]
+ if self.isowner(relative_filename, destroot):
+ collisions.append(filename)
+ if not collisions:
+ continue
+ for filename in collisions:
+ del blocker_contents[filename]
+ f = atomic_ofstream(os.path.join(blocker.dbdir, "CONTENTS"))
+ try:
+ for filename in sorted(blocker_contents):
+ entry_data = blocker_contents[filename]
+ entry_type = entry_data[0]
+ relative_filename = filename[destroot_len:]
+ if entry_type == "obj":
+ entry_type, mtime, md5sum = entry_data
+ line = "%s %s %s %s\n" % \
+ (entry_type, relative_filename, md5sum, mtime)
+ elif entry_type == "sym":
+ entry_type, mtime, link = entry_data
+ line = "%s %s -> %s %s\n" % \
+ (entry_type, relative_filename, link, mtime)
+ else: # dir, dev, fif
+ line = "%s %s\n" % (entry_type, relative_filename)
+ f.write(line)
+ finally:
+ f.close()
+
# Due to mtime granularity, mtime checks do not always properly
# invalidate vardbapi caches.
self.vartree.dbapi.mtdircache.pop(self.cat, None)
"""Returns keys for all packages within pkgdir"""
return self.portdb.cp_list(self.cp, mytree=self.mytree)
-def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
+def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
+ vartree=None, prev_mtimes=None, blockers=None):
"""will merge a .tbz2 file, returning a list of runtime dependencies
that must be satisfied, or None if there was a merge error. This
code assumes the package exists."""
#tbz2_lock = None
mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
- treetype="bintree")
+ treetype="bintree", blockers=blockers)
retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
mydbapi=mydbapi, prev_mtimes=prev_mtimes)
did_merge_phase = True