return mynewlines
-def clean_world(vardb, cpv):
+def world_clean_package(vardb, cpv):
"""Remove a package from the world file when unmerged."""
world_set = WorldSet(vardb.settings)
world_set.lock()
return "medium-soft"
return "soft"
+class BlockerDepPriority(DepPriority):
+ __slots__ = ()
+ def __int__(self):
+ return 0
+
class UnmergeDepPriority(AbstractDepPriority):
+ __slots__ = ()
"""
Combination of properties Priority Category
shown_comments.add(comment)
return have_eapi_mask
-class Package(object):
- __slots__ = ("__weakref__", "built", "cpv", "depth",
- "installed", "metadata", "root", "onlydeps", "type_name",
- "cp", "cpv_slot", "slot_atom", "_digraph_node")
+class Task(object):
+ __slots__ = ("__weakref__", "_hash_key",)
+
def __init__(self, **kwargs):
for myattr in self.__slots__:
if myattr == "__weakref__":
myvalue = kwargs.get(myattr, None)
setattr(self, myattr, myvalue)
+ def _get_hash_key(self):
+ try:
+ return self._hash_key
+ except AttributeError:
+ raise NotImplementedError(self)
+
+ def __eq__(self, other):
+ return self._get_hash_key() == other
+
+ def __ne__(self, other):
+ return self._get_hash_key() != other
+
+ def __hash__(self):
+ return hash(self._get_hash_key())
+
+ def __len__(self):
+ return len(self._get_hash_key())
+
+ def __getitem__(self, key):
+ return self._get_hash_key()[key]
+
+ def __iter__(self):
+ return iter(self._get_hash_key())
+
+ def __contains__(self, key):
+ return key in self._get_hash_key()
+
+ def __str__(self):
+ return str(self._get_hash_key())
+
+class Package(Task):
+ __slots__ = ("built", "cpv", "depth",
+ "installed", "metadata", "root", "onlydeps", "type_name",
+ "cp", "cpv_slot", "slot_atom")
+ def __init__(self, **kwargs):
+ Task.__init__(self, **kwargs)
self.cp = portage.cpv_getkey(self.cpv)
self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
- status = "merge"
- if self.onlydeps or self.installed:
- status = "nomerge"
- self._digraph_node = (self.type_name, self.root, self.cpv, status)
+ def _get_hash_key(self):
+ try:
+ return self._hash_key
+ except AttributeError:
+ operation = "merge"
+ if self.onlydeps or self.installed:
+ operation = "nomerge"
+ self._hash_key = \
+ (self.type_name, self.root, self.cpv, operation)
+ return self._hash_key
def __lt__(self, other):
other_split = portage.catpkgsplit(other.cpv)
return True
return False
- def __eq__(self, other):
- return self._digraph_node == other
- def __ne__(self, other):
- return self._digraph_node != other
- def __hash__(self):
- return hash(self._digraph_node)
- def __len__(self):
- return len(self._digraph_node)
- def __getitem__(self, key):
- return self._digraph_node[key]
- def __iter__(self):
- return iter(self._digraph_node)
- def __contains__(self, key):
- return key in self._digraph_node
- def __str__(self):
- return str(self._digraph_node)
+class Uninstall(Package):
+ def _get_hash_key(self):
+ try:
+ return self._hash_key
+ except AttributeError:
+ self._hash_key = \
+ (self.type_name, self.root, self.cpv, "uninstall")
+ return self._hash_key
class DependencyArg(object):
def __init__(self, arg=None, root_config=None):
self._cp_map = {}
self._cpv_map = {}
+ def __contains__(self, item):
+ existing = self._cpv_map.get(item.cpv)
+ if existing is not None and \
+ existing == item:
+ return True
+ return False
+
def _clear_cache(self):
if self._categories is not None:
self._categories = None
# require that the old version be uninstalled at build
# time.
continue
- if parent_static and \
- slot_atom not in modified_slots[myroot]:
- # This blocker will be handled the next time that a
- # merge of either package is triggered.
+ if parent.installed:
+ # Two currently installed packages conflict with
+ # eachother. Ignore this case since the damage
+ # is already done and this would be likely to
+ # confuse users if displayed like a normal blocker.
+ continue
+ if pstatus == "merge":
+ # Maybe the blocked package can be replaced or simply
+ # unmerged to resolve this block.
+ inst_pkg = self._pkg_cache[
+ ("installed", myroot, cpv, "nomerge")]
+ depends_on_order.add((inst_pkg, parent))
continue
- if pstatus == "merge" and \
- slot_atom in modified_slots[myroot]:
- replacement = self._slot_pkg_map[myroot][slot_atom]
- if not portage.match_from_list(
- mydep, [replacement.cpv_slot]):
- # Apparently a replacement may be able to
- # invalidate this block.
- depends_on_order.add((replacement, parent))
- continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
# This blocker will be handled the next time that a
# merge of either package is triggered.
continue
- if not parent_static and pstatus == "nomerge" and \
- slot_atom in modified_slots[myroot]:
- replacement = self._slot_pkg_map[myroot][pslot_atom]
- if replacement not in \
- self.blocker_parents[blocker]:
- # Apparently a replacement may be able to
- # invalidate this block.
- blocked_node = \
- self._slot_pkg_map[myroot][slot_atom]
- depends_on_order.add(
- (replacement, blocked_node))
- continue
+
+ # Maybe the blocking package can be
+ # unmerged to resolve this block.
+ try:
+ blocked_pkg = self._slot_pkg_map[myroot][slot_atom]
+ except KeyError:
+ blocked_pkg = self._pkg_cache[
+ ("installed", myroot, cpv, "nomerge")]
+ if pstatus == "merge" and blocked_pkg.installed:
+ depends_on_order.add((blocked_pkg, parent))
+ continue
+ elif pstatus == "nomerge":
+ depends_on_order.add((parent, blocked_pkg))
+ continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
+
+ # Make sure we don't unmerge any package that have been pulled
+ # into the graph.
if not unresolved_blocks and depends_on_order:
- for node, pnode in depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ if self.digraph.contains(inst_pkg) and \
+ self.digraph.parent_nodes(inst_pkg):
+ unresolved_blocks = True
+ break
+
+ if not unresolved_blocks and depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ uninst_task = Uninstall(built=inst_pkg.built,
+ cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+ metadata=inst_pkg.metadata, root=inst_pkg.root,
+ type_name=inst_pkg.type_name)
+ self._pkg_cache[uninst_task] = uninst_task
# Enforce correct merge order with a hard dep.
- self.digraph.addnode(node, pnode,
- priority=DepPriority(buildtime=True))
+ self.digraph.addnode(uninst_task, inst_task,
+ priority=BlockerDepPriority())
# Count references to this blocker so that it can be
# invalidated after nodes referencing it have been
# merged.
- self.blocker_digraph.addnode(node, blocker)
+ self.blocker_digraph.addnode(uninst_task, blocker)
if not unresolved_blocks and not depends_on_order:
self.blocker_parents[blocker].remove(parent)
if unresolved_blocks:
return -1
myblockers = self.blocker_digraph.copy()
retlist=[]
- circular_blocks = False
+ # Contains any Uninstall tasks that have been ignored
+ # in order to avoid the circular deps code path. These
+ # correspond to blocker conflicts that could not be
+ # resolved.
+ ignored_uninstall_tasks = set()
blocker_deps = None
asap_nodes = []
portage_node = None
- if reversed:
- get_nodes = mygraph.root_nodes
- else:
- get_nodes = mygraph.leaf_nodes
+ def get_nodes(**kwargs):
+ """
+ Returns leaf nodes excluding Uninstall instances
+ since those should be executed as late as possible.
+ """
+ return [node for node in mygraph.leaf_nodes(**kwargs) \
+ if not isinstance(node, Uninstall)]
+ if True:
for node in mygraph.order:
if node.root == "/" and \
"sys-apps/portage" == portage.cpv_getkey(node.cpv):
selected_nodes = list(selected_nodes)
selected_nodes.sort(cmp_circular_bias)
- if not selected_nodes:
- if not myblockers.is_empty():
- """A blocker couldn't be circumnavigated while keeping all
- dependencies satisfied. The user will have to resolve this
- manually. This is a panic condition and thus the order
- doesn't really matter, so just pop a random node in order
- to avoid a circular dependency panic if possible."""
- if not circular_blocks:
- circular_blocks = True
- blocker_deps = myblockers.leaf_nodes()
- while blocker_deps:
- # Some of these nodes might have already been selected
- # by the normal node selection process after the
- # circular_blocks flag has been set. Therefore, we
- # have to verify that they're still in the graph so
- # that they're not selected more than once.
- node = blocker_deps.pop()
- if mygraph.contains(node):
- selected_nodes = [node]
+ if not selected_nodes and not myblockers.is_empty():
+ # An Uninstall task needs to be executed in order to
+ # avoid conflict if possible.
+
+ min_parent_deps = None
+ uninst_task = None
+ for task in myblockers.leaf_nodes():
+ # Do some sanity checks so that system or world packages
+ # don't get uninstalled inappropriately here (only really
+ # necessary when --complete-graph has not been enabled).
+
+ if task in ignored_uninstall_tasks:
+ continue
+
+ root_config = self.roots[task.root]
+ inst_pkg = self._pkg_cache[
+ ("installed", task.root, task.cpv, "nomerge")]
+
+ # For packages in the system set, don't take
+ # any chances. If the conflict can't be resolved
+ # by a normal upgrade operation then require
+ # user intervention.
+ skip = False
+ try:
+ for atom in root_config.sets[
+ "system"].iterAtomsForPackage(task):
+ skip = True
break
+ except portage_exception.InvalidDependString:
+ skip = True
+ if skip:
+ continue
+
+ # For packages in the world set, go ahead an uninstall
+ # when necessary, as long as the atom will be satisfied
+ # in the final state.
+ graph_db = self.mydbapi[task.root]
+ try:
+ for atom in root_config.sets[
+ "world"].iterAtomsForPackage(task):
+ satisfied = False
+ for cpv in graph_db.match(atom):
+ if cpv == inst_pkg.cpv and \
+ inst_pkg in graph_db:
+ continue
+ satisfied = True
+ break
+ if not satisfied:
+ skip = True
+ break
+ except portage_exception.InvalidDependString:
+ skip = True
+ if skip:
+ continue
+
+ # Check the deps of parent nodes to ensure that
+ # the chosen task produces a leaf node. Maybe
+ # this can be optimized some more to make the
+ # best possible choice, but the current algorithm
+ # is simple and should be near optimal for most
+ # common cases.
+ parent_deps = set()
+ for parent in mygraph.parent_nodes(task):
+ parent_deps.update(mygraph.child_nodes(parent,
+ ignore_priority=DepPriority.MEDIUM_SOFT))
+ parent_deps.remove(task)
+ if min_parent_deps is None or \
+ len(parent_deps) < min_parent_deps:
+ min_parent_deps = len(parent_deps)
+ uninst_task = task
+
+ if uninst_task is not None:
+ selected_nodes = [uninst_task]
+ else:
+ # None of the Uninstall tasks are acceptable, so
+ # the corresponding blockers are unresolvable.
+ # We need to drop an Uninstall task here in order
+ # to avoid the circular deps code path, but the
+ # blocker will still be counted as an unresolved
+ # conflict.
+ for node in myblockers.leaf_nodes():
+ try:
+ mygraph.remove(node)
+ except KeyError:
+ pass
+ else:
+ ignored_uninstall_tasks.add(node)
+ break
+
+ # After dropping an Uninstall task, reset
+ # the state variables for leaf node selection and
+ # continue trying to select leaf nodes.
+ prefer_asap = True
+ accept_root_node = False
+ continue
if not selected_nodes:
# No leaf nodes are available, so we have a circular
accept_root_node = False
for node in selected_nodes:
+
+ # Handle interactions between blockers
+ # and uninstallation tasks.
+ uninst_task = None
+ if isinstance(node, Uninstall):
+ uninst_task = node
+ else:
+ vardb = self.trees[node.root]["vartree"].dbapi
+ previous_cpv = vardb.match(node.slot_atom)
+ if previous_cpv:
+ # The package will be replaced by this one, so remove
+ # the corresponding Uninstall task if necessary.
+ previous_cpv = previous_cpv[0]
+ uninst_task = \
+ ("installed", node.root, previous_cpv, "uninstall")
+ try:
+ mygraph.remove(uninst_task)
+ except KeyError:
+ pass
+ if uninst_task is not None and \
+ uninst_task not in ignored_uninstall_tasks and \
+ myblockers.contains(uninst_task):
+ myblockers.remove(uninst_task)
+ for blocker in myblockers.root_nodes():
+ if myblockers.child_nodes(blocker):
+ continue
+ myblockers.remove(blocker)
+ unresolved = \
+ self._unresolved_blocker_parents.get(blocker)
+ if unresolved:
+ self.blocker_parents[blocker] = unresolved
+ else:
+ del self.blocker_parents[blocker]
+
if node[-1] != "nomerge":
retlist.append(list(node))
mygraph.remove(node)
- if not reversed and not circular_blocks and myblockers.contains(node):
- """This node may have invalidated one or more blockers."""
- myblockers.remove(node)
- for blocker in myblockers.root_nodes():
- if not myblockers.child_nodes(blocker):
- myblockers.remove(blocker)
- unresolved = \
- self._unresolved_blocker_parents.get(blocker)
- if unresolved:
- self.blocker_parents[blocker] = unresolved
- else:
- del self.blocker_parents[blocker]
if not reversed:
"""Blocker validation does not work with reverse mode,
pkg = self._pkg_cache[tuple(x)]
metadata = pkg.metadata
pkg_status = x[3]
- pkg_merge = ordered and pkg_status != "nomerge"
- if pkg in self._slot_collision_nodes or pkg.onlydeps:
+ pkg_merge = ordered and pkg_status == "merge"
+ if not pkg_merge and pkg_status == "merge":
+ pkg_status = "nomerge"
+ if pkg_status == "uninstall":
+ mydbapi = vardb
+ elif pkg in self._slot_collision_nodes or pkg.onlydeps:
# The metadata isn't cached due to a slot collision or
# --onlydeps.
mydbapi = self.trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
mydbapi.aux_get(pkg_key, ["RESTRICT"])[0]),
uselist=pkg_use))
except portage_exception.InvalidDependString, e:
- if pkg_status != "nomerge":
+ if not pkg.installed:
restrict = mydbapi.aux_get(pkg_key, ["RESTRICT"])[0]
show_invalid_depstring_notice(x, restrict, str(e))
del e
installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
if vardb.cpv_exists(pkg_key):
addl=" "+yellow("R")+fetch+" "
- if x[3] != "nomerge":
- if ordered:
- counters.reinst += 1
+ if pkg_merge:
+ counters.reinst += 1
+ elif pkg_status == "uninstall":
+ counters.uninst += 1
# filter out old-style virtual matches
elif installed_versions and \
portage.cpv_getkey(installed_versions[0]) == \
# now use the data to generate output
repoadd = None
- if pkg_status == "nomerge" or not has_previous:
+ if pkg.installed or not has_previous:
repoadd = repo_display.repoStr(repo_path_real)
else:
repo_path_prev = None
return colorize("PKG_MERGE_WORLD", pkg_str)
else:
return colorize("PKG_MERGE", pkg_str)
+ elif pkg_status == "uninstall":
+ return colorize("PKG_UNINSTALL", pkg_str)
else:
if pkg_system:
return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
myprint=myprint+myoldbest
myprint=myprint+darkgreen("to "+x[1])
else:
- myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_cp)
+ if not pkg_merge:
+ myprint = "[%s] %s%s" % \
+ (pkgprint(pkg_status.ljust(13)),
+ indent, pkgprint(pkg.cp))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (pkgprint(pkg.type_name), addl,
+ indent, pkgprint(pkg.cp))
if (newlp-nc_len(myprint)) > 0:
myprint=myprint+(" "*(newlp-nc_len(myprint)))
myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
myprint=myprint+darkgreen("to "+x[1])+" "+verboseadd
else:
if not pkg_merge:
- myprint = "[%s ] " % pkgprint("nomerge")
+ myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
else:
myprint = "[" + pkg_type + " " + addl + "] "
myprint += indent + pkgprint(pkg_key) + " " + \
myprint=myprint+" "+green(xs[1]+xs[2])+" "
myprint=myprint+myoldbest
else:
- myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_cp)
+ if not pkg_merge:
+ myprint = "[%s] %s%s" % \
+ (pkgprint(pkg_status.ljust(13)),
+ indent, pkgprint(pkg.cp))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (pkgprint(pkg.type_name), addl,
+ indent, pkgprint(pkg.cp))
if (newlp-nc_len(myprint)) > 0:
myprint=myprint+(" "*(newlp-nc_len(myprint)))
myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
myprint=myprint+myoldbest+" "+verboseadd
else:
if not pkg_merge:
- myprint="["+pkgprint("nomerge")+" ] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
+ myprint = "[%s] %s%s %s %s" % \
+ (pkgprint(pkg_status.ljust(13)),
+ indent, pkgprint(pkg.cpv),
+ myoldbest, verboseadd)
else:
myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
p.append(myprint)
pkg_type, myroot, pkg_key, action = x
if pkg_type not in self.pkg_tree_map:
continue
- if action != "merge":
+ if action not in ("merge", "uninstall"):
continue
mydb = trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
try:
mydb.aux_get(pkg_key, self._mydbapi_keys)))
except KeyError:
# It does no exist or it is corrupt.
+ if action == "uninstall":
+ continue
raise portage_exception.PackageNotFound(pkg_key)
if pkg_type == "ebuild":
pkgsettings = self.pkgsettings[myroot]
pkgsettings.setcpv(pkg_key, mydb=metadata)
metadata["USE"] = pkgsettings["PORTAGE_USE"]
- installed = False
+ installed = action == "uninstall"
built = pkg_type != "ebuild"
- pkg = Package(built=built, cpv=pkg_key, installed=installed,
- metadata=metadata, root=myroot, type_name=pkg_type)
+ if installed:
+ pkg_constructor = Uninstall
+ else:
+ pkg_constructor = Package
+ pkg = pkg_constructor(built=built, cpv=pkg_key,
+ installed=installed, metadata=metadata,
+ root=myroot, type_name=pkg_type)
self._pkg_cache[pkg] = pkg
fakedb[myroot].cpv_inject(pkg)
self.spinner.update()
self.new = 0
self.newslot = 0
self.reinst = 0
+ self.uninst = 0
self.blocks = 0
self.totalsize = 0
self.restrict_fetch = 0
details.append("%s reinstall" % self.reinst)
if self.reinst > 1:
details[-1] += "s"
+ if self.uninst > 0:
+ details.append("%s uninstall" % self.uninst)
+ if self.uninst > 1:
+ details[-1] += "s"
if self.blocks > 0:
details.append("%s block" % self.blocks)
if self.blocks > 1:
portage.config(clone=trees["/"]["vartree"].settings)
self.curval = 0
self._spawned_pids = []
+ self._uninstall_queue = []
def merge(self, mylist, favorites, mtimedb):
try:
pass
spawned_pids.remove(pid)
+ def _dequeue_uninstall_tasks(self, mtimedb):
+ if not self._uninstall_queue:
+ return
+ for uninst_task in self._uninstall_queue:
+ root_config = self.trees[uninst_task.root]["root_config"]
+ unmerge(root_config.settings, self.myopts,
+ root_config.trees["vartree"], "unmerge",
+ [uninst_task.cpv], mtimedb["ldpath"], clean_world=0)
+ del mtimedb["resume"]["mergelist"][0]
+ mtimedb.commit()
+ del self._uninstall_queue[:]
+
def _merge(self, mylist, favorites, mtimedb):
failed_fetches = []
+ buildpkgonly = "--buildpkgonly" in self.myopts
fetchonly = "--fetchonly" in self.myopts or \
"--fetch-all-uri" in self.myopts
pretend = "--pretend" in self.myopts
metadata_keys = [k for k in portage.auxdbkeys \
if not k.startswith("UNUSED_")] + ["USE"]
+ task_list = mymergelist
+ # Filter mymergelist so that all the len(mymergelist) calls
+ # below (for display) do not count Uninstall instances.
+ mymergelist = [x for x in mymergelist if x[-1] == "merge"]
mergecount=0
- for x in mymergelist:
+ for x in task_list:
pkg_type = x[0]
if pkg_type == "blocks":
continue
- mergecount+=1
myroot=x[1]
pkg_key = x[2]
pkgindex=2
portdb = self.trees[myroot]["porttree"].dbapi
bindb = self.trees[myroot]["bintree"].dbapi
vartree = self.trees[myroot]["vartree"]
+ vardb = vartree.dbapi
+ root_config = self.trees[myroot]["root_config"]
pkgsettings = self.pkgsettings[myroot]
metadata = {}
if pkg_type == "blocks":
else:
if pkg_type == "binary":
mydbapi = bindb
+ elif pkg_type == "installed":
+ mydbapi = vardb
else:
raise AssertionError("Package type: '%s'" % pkg_type)
metadata.update(izip(metadata_keys,
mydbapi.aux_get(pkg_key, metadata_keys)))
built = pkg_type != "ebuild"
installed = pkg_type == "installed"
- pkg = Package(type_name=pkg_type, root=myroot,
+ if installed:
+ pkg_constructor = Uninstall
+ else:
+ pkg_constructor = Package
+ mergecount += 1
+ pkg = pkg_constructor(type_name=pkg_type, root=myroot,
cpv=pkg_key, built=built, installed=installed,
metadata=metadata)
+ if pkg.installed:
+ if not (buildpkgonly or fetchonly or pretend):
+ self._uninstall_queue.append(pkg)
+ continue
+
if x[0]=="blocks":
pkgindex=3
y = portdb.findname(pkg_key)
bintree = self.trees[myroot]["bintree"]
if bintree.populated:
bintree.inject(pkg_key)
+ self._dequeue_uninstall_tasks(mtimedb)
if "--buildpkgonly" not in self.myopts:
msg = " === (%s of %s) Merging (%s::%s)" % \
(mergecount, len(mymergelist), pkg_key, y)
short_msg = "emerge: (%s of %s) %s Compile" % \
(mergecount, len(mymergelist), pkg_key)
emergelog(xterm_titles, msg, short_msg=short_msg)
- retval = portage.doebuild(y, "merge", myroot,
+ retval = portage.doebuild(y, "install", myroot,
pkgsettings, self.edebug, vartree=vartree,
mydbapi=portdb, tree="porttree",
prev_mtimes=ldpath_mtimes)
if retval != os.EX_OK:
return retval
+ self._dequeue_uninstall_tasks(mtimedb)
+ retval = portage.merge(pkgsettings["CATEGORY"],
+ pkgsettings["PF"], pkgsettings["D"],
+ os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
+ "build-info"), myroot, pkgsettings,
+ myebuild=pkgsettings["EBUILD"],
+ mytree="porttree", mydbapi=portdb,
+ vartree=vartree, prev_mtimes=ldpath_mtimes)
+ if retval != os.EX_OK:
+ return retval
finally:
if builddir_lock:
portage_locks.unlockdir(builddir_lock)
portage_locks.unlockdir(catdir_lock)
elif x[0]=="binary":
+ self._dequeue_uninstall_tasks(mtimedb)
#merge the tbz2
mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key)
if "--getbinpkg" in self.myopts:
return os.EX_OK
def unmerge(settings, myopts, vartree, unmerge_action, unmerge_files,
- ldpath_mtimes, autoclean=0):
+ ldpath_mtimes, autoclean=0, clean_world=1):
candidate_catpkgs=[]
global_unmerge=0
xterm_titles = "notitles" not in settings.features
emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
sys.exit(retval)
else:
- clean_world(vartree.dbapi, y)
+ if clean_world:
+ world_clean_package(vartree.dbapi, y)
emergelog(xterm_titles, " >>> unmerge success: "+y)
return 1