import portage
del os.environ["PORTAGE_LEGACY_GLOBALS"]
from portage import digraph, portdbapi
-from portage.const import NEWS_LIB_PATH
+from portage.const import NEWS_LIB_PATH, CACHE_PATH, PRIVATE_PATH
import emerge.help
import portage.xpak, commands, errno, re, socket, time, types
import portage.exception
from portage.data import secpass
from portage.util import normalize_path as normpath
+from portage.util import writemsg
if not hasattr(__builtins__, "set"):
from sets import Set as set
print "Interrupted."
sys.exit(1)
-def sorted_versions(verlist):
- ret = []
- for ver in verlist:
- verparts = ver.split("-")
- if len(verparts) == 2:
- verrev = int(verparts[1][1:])
- else:
- verrev = 0
- x = 0
- while x < len(ret):
- retparts = ret[x].split("-")
- verdiff = portage.vercmp(retparts[0], verparts[0])
- if verdiff > 0:
- break
- elif verdiff == 0:
- if len(retparts) == 2:
- retrev = int(retparts[1][1:])
- else:
- retrev = 0
- if retrev >= verrev:
- break
- x += 1
- ret.insert(x, ver)
- return ret
-
-
actions=[
"clean", "config", "depclean",
"info", "metadata",
if xterm_titles:
if short_msg == None:
short_msg = mystr
- if "HOSTNAME" in os.environ.keys():
+ if "HOSTNAME" in os.environ:
short_msg = os.environ["HOSTNAME"]+": "+short_msg
xtermTitle(short_msg)
try:
return myparams
# search functionality
-class search:
+class search(object):
#
# class constants
print "Searching... ",
regexsearch = False
- if self.searchkey[0] == '%':
+ if self.searchkey.startswith('%'):
regexsearch = True
self.searchkey = self.searchkey[1:]
- if self.searchkey[0] == '@':
+ if self.searchkey.startswith('@'):
match_category = 1
self.searchkey = self.searchkey[1:]
if regexsearch:
if self.searchre.search(full_desc):
self.matches["desc"].append([full_package,masked])
self.mlen=0
- for mtype in self.matches.keys():
+ for mtype in self.matches:
self.matches[mtype].sort()
self.mlen += len(self.matches[mtype])
print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
print "[ Applications found : "+white(str(self.mlen))+" ]"
print " "
- for mtype in self.matches.keys():
+ for mtype in self.matches:
for match,masked in self.matches[mtype]:
if mtype=="pkg":
catpack=match
else:
#this doesn't match the package we're unmerging; keep it.
newworldlist.append(x)
-
+
+ newworldlist.sort()
+
portage.util.ensure_dirs(os.path.join(vardb.root, portage.PRIVATE_PATH),
gid=portage.portage_gid, mode=02770)
- portage.util.write_atomic(world_filename, "\n".join(newworldlist))
+ portage.util.write_atomic(world_filename, "\n".join(newworldlist)+"\n")
def genericdict(mylist):
mynewdict={}
def filter_iuse_defaults(iuse):
for flag in iuse:
- if flag.startswith("+"):
+ if flag.startswith("+") or flag.startswith("-"):
yield flag[1:]
else:
yield flag
f.add_flowing_data(x)
f.end_paragraph(1)
-class depgraph:
+class depgraph(object):
pkg_tree_map = {
"ebuild":"porttree",
self.args_keys = []
self.blocker_digraph = digraph()
self.blocker_parents = {}
+ self._unresolved_blocker_parents = {}
self._slot_collision_info = []
# Slot collision nodes are not allowed to block other packages since
# blocker validation is only able to account for one package per slot.
self.blocker_parents.setdefault(
tuple(mybigkey), set()).add(myparent)
return 1
- if not arg and myroot == self.target_root:
- arg = portage.best_match_to_list(mykey, self.args_keys)
+
# select the correct /var database that we'll be checking against
vardbapi = self.trees[myroot]["vartree"].dbapi
portdb = self.trees[myroot]["porttree"].dbapi
mydbapi = self.trees[myroot][self.pkg_tree_map[mytype]].dbapi
+ if not arg and myroot == self.target_root:
+ cpv_slot = "%s:%s" % (mykey, mydbapi.aux_get(mykey, ["SLOT"])[0])
+ arg = portage.best_match_to_list(cpv_slot, self.args_keys)
+
if myuse is None:
self.pkgsettings[myroot].setcpv(mykey, mydb=portdb)
myuse = self.pkgsettings[myroot]["USE"].split()
if addme:
metadata = dict(izip(self._mydbapi_keys,
mydbapi.aux_get(mykey, self._mydbapi_keys)))
- if merging == 0 and vardbapi.cpv_exists(mykey):
- metadata["USE"] = vardbapi.aux_get(mykey, ["USE"])[0]
- myuse = metadata["USE"].split()
- metadata["SLOT"] = vardbapi.aux_get(mykey, ["SLOT"])[0]
+ if merging == 0 and vardbapi.cpv_exists(mykey) and \
+ mytype != "installed":
+ mybigkey[0] = "installed"
+ mydbapi = vardbapi
+ jbigkey = tuple(mybigkey)
+ metadata = dict(izip(self._mydbapi_keys,
+ mydbapi.aux_get(mykey, self._mydbapi_keys)))
+ myuse = mydbapi.aux_get(mykey, ["USE"])[0].split()
slot_atom = "%s:%s" % (portage.dep_getkey(mykey), metadata["SLOT"])
existing_node = self._slot_node_map[myroot].get(
slot_atom, None)
if p_status == "merge":
# Update old-style virtuals if this package provides any.
# These are needed for dep_virtual calls inside dep_check.
- self.pkgsettings[p_root].setinst(p_key,
- self.trees[p_root][self.pkg_tree_map[p_type]].dbapi)
+ p_db = self.trees[p_root][self.pkg_tree_map[p_type]].dbapi
+ try:
+ self.pkgsettings[p_root].setinst(p_key, p_db)
+ except portage.exception.InvalidDependString, e:
+ provide = p_db.aux_get(p_key, ["PROVIDE"])[0]
+ show_invalid_depstring_notice(myparent, provide, str(e))
+ del e
+ return 0
if "--debug" in self.myopts:
print "Candidates:",mymerge
print "\n!!! "+red("All ebuilds that could satisfy ")+green(xinfo)+red(" have been masked.")
print "!!! One of the following masked packages is required to complete your request:"
oldcomment = ""
+ shown_licenses = []
for p in alleb:
mreasons = portage.getmaskingstatus(p,
settings=pkgsettings, portdb=portdb)
print "- "+p+" (masked by: "+", ".join(mreasons)+")"
- comment, filename = portage.getmaskingreason(p,
- settings=pkgsettings, portdb=portdb, return_location=True)
- if comment and comment != oldcomment:
- print filename+":"
- print comment
- oldcomment = comment
+ if "package.mask" in mreasons:
+ comment, filename = \
+ portage.getmaskingreason(p,
+ settings=pkgsettings, portdb=portdb,
+ return_location=True)
+ if comment and comment != oldcomment:
+ print filename+":"
+ print comment
+ oldcomment = comment
+ try:
+ licenses = portdb.aux_get(p, ["LICENSE"])[0]
+ except KeyError:
+ # Corruption will have been reported above.
+ continue
+ uselist = []
+ if "?" in licenses:
+ pkgsettings.setcpv(p, mydb=portdb)
+ uselist = pkgsettings.get("USE", "").split()
+ missing_licenses = []
+ try:
+ missing_licenses = \
+ pkgsettings.getMissingLicenses(
+ licenses, p, uselist)
+ except portage.exception.InvalidDependString:
+ # This will have already been reported
+ # above via mreasons.
+ pass
+ for l in missing_licenses:
+ l_path = portdb.findLicensePath(l)
+ if l in shown_licenses:
+ continue
+ msg = ("A copy of the '%s' license" + \
+ " is located at '%s'.") % (l, l_path)
+ print msg
+ print
+ shown_licenses.append(l)
print
print "For more information, see MASKED PACKAGES section in the emerge man page or "
print "refer to the Gentoo Handbook."
blocked_slots_final[cpv] = \
"%s:%s" % (portage.dep_getkey(cpv),
final_db.aux_get(cpv, ["SLOT"])[0])
- blocked_slots_final_values = set(blocked_slots_final.itervalues())
for parent in list(self.blocker_parents[blocker]):
ptype, proot, pcpv, pstatus = parent
pdbapi = self.trees[proot][self.pkg_tree_map[ptype]].dbapi
for cpv in blocked_initial:
slot_atom = blocked_slots_initial[cpv]
if slot_atom == pslot_atom:
- # The parent blocks an initial package in the same
- # slot as itself. The merge/nomerge status of neither
- # node matters. In any case, this particular block is
- # automatically resolved.
+ # TODO: Support blocks within slots in cases where it
+ # might make sense. For example, a new version might
+ # require that the old version be uninstalled at build
+ # time.
continue
if parent_static and \
slot_atom not in modified_slots[myroot]:
# merge of either package is triggered.
continue
if pstatus == "merge" and \
- slot_atom not in blocked_slots_final_values:
- upgrade_matches = final_db.match(slot_atom)
- if upgrade_matches:
- # Apparently an upgrade may be able to invalidate
- # this block.
- upgrade_node = \
- self.pkg_node_map[proot][upgrade_matches[0]]
- depends_on_order.add(upgrade_node)
- continue
+ slot_atom in modified_slots[myroot]:
+ replacement = final_db.match(slot_atom)
+ if replacement:
+ if not portage.match_from_list(mydep, replacement):
+ # Apparently a replacement may be able to
+ # invalidate this block.
+ replacement_node = \
+ self.pkg_node_map[proot][replacement[0]]
+ depends_on_order.add((replacement_node, parent))
+ continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
for cpv in blocked_final:
slot_atom = blocked_slots_final[cpv]
if slot_atom == pslot_atom:
- # The parent blocks itself, so the merge order does not
- # need to be enforced.
+ # TODO: Support blocks within slots.
continue
if parent_static and \
slot_atom not in modified_slots[myroot]:
# This blocker will be handled the next time that a
# merge of either package is triggered.
continue
+ if not parent_static and pstatus == "nomerge" and \
+ slot_atom in modified_slots[myroot]:
+ replacement = final_db.match(pslot_atom)
+ if replacement:
+ replacement_node = \
+ self.pkg_node_map[proot][replacement[0]]
+ if replacement_node not in \
+ self.blocker_parents[blocker]:
+ # Apparently a replacement may be able to
+ # invalidate this block.
+ blocked_node = self.pkg_node_map[proot][cpv]
+ depends_on_order.add(
+ (replacement_node, blocked_node))
+ continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
if not unresolved_blocks and depends_on_order:
- for node in depends_on_order:
+ for node, pnode in depends_on_order:
# Enforce correct merge order with a hard dep.
- self.digraph.addnode(node, parent,
+ self.digraph.addnode(node, pnode,
priority=DepPriority(buildtime=True))
# Count references to this blocker so that it can be
# invalidated after nodes referencing it have been
self.blocker_digraph.addnode(node, blocker)
if not unresolved_blocks and not depends_on_order:
self.blocker_parents[blocker].remove(parent)
+ if unresolved_blocks:
+ self._unresolved_blocker_parents.setdefault(
+ blocker, set()).add(parent)
if not self.blocker_parents[blocker]:
del self.blocker_parents[blocker]
# Validate blockers that depend on merge order.
if not circular_blocks:
circular_blocks = True
blocker_deps = myblockers.leaf_nodes()
- if blocker_deps:
- selected_nodes = [blocker_deps.pop()]
+ while blocker_deps:
+ # Some of these nodes might have already been selected
+ # by the normal node selection process after the
+ # circular_blocks flag has been set. Therefore, we
+ # have to verify that they're still in the graph so
+ # that they're not selected more than once.
+ node = blocker_deps.pop()
+ if mygraph.contains(node):
+ selected_nodes = [node]
+ break
if not selected_nodes:
# No leaf nodes are available, so we have a circular
for blocker in myblockers.root_nodes():
if not myblockers.child_nodes(blocker):
myblockers.remove(blocker)
- del self.blocker_parents[blocker]
+ unresolved = \
+ self._unresolved_blocker_parents.get(blocker)
+ if unresolved:
+ self.blocker_parents[blocker] = unresolved
+ else:
+ del self.blocker_parents[blocker]
if not reversed:
"""Blocker validation does not work with reverse mode,
else:
#world mode
worldlist = getlist(self.settings, "world")
- sysdict = genericdict(getlist(self.settings, "system"))
+ mylist = getlist(self.settings, "system")
worlddict=genericdict(worldlist)
- for x in worlddict.keys():
+ for x in worlddict:
if not portage.isvalidatom(x):
world_problems = True
- elif not self.trees[self.target_root]["vartree"].dbapi.match(x):
+ continue
+ elif not vardb.match(x):
world_problems = True
- else:
- sysdict[x]=worlddict[x]
-
- mylist = sysdict.keys()
+ available = False
+ if "--usepkgonly" not in self.myopts and \
+ portdb.match(x):
+ available = True
+ elif "--usepkg" in self.myopts:
+ mymatches = bindb.match(x)
+ if "--usepkgonly" not in self.myopts:
+ mymatches = visible(mymatches)
+ if mymatches:
+ available = True
+ if not available:
+ continue
+ mylist.append(x)
newlist = []
for atom in mylist:
"--verbose" in self.myopts and 3 or 2)
changelogs=[]
p=[]
+ blockers = []
counters = PackageCounters()
for t in self.settings["PORTDIR_OVERLAY"].split()]
tree_nodes = []
- node_depth = {}
+ display_list = []
+ mygraph = self._parent_child_digraph
i = 0
depth = 0
+ shown_edges = set()
for x in mylist:
if "blocks" == x[0]:
+ display_list.append((x, 0, True))
continue
graph_key = tuple(x)
if "--tree" in self.myopts:
depth = len(tree_nodes)
while depth and graph_key not in \
- self.digraph.child_nodes(tree_nodes[depth-1]):
+ mygraph.child_nodes(tree_nodes[depth-1]):
depth -= 1
- tree_nodes = tree_nodes[:depth]
- tree_nodes.append(graph_key)
- node_depth[graph_key] = depth
+ if depth:
+ tree_nodes = tree_nodes[:depth]
+ tree_nodes.append(graph_key)
+ display_list.append((x, depth, True))
+ shown_edges.add((graph_key, tree_nodes[depth-1]))
+ else:
+ traversed_nodes = set() # prevent endless circles
+ traversed_nodes.add(graph_key)
+ def add_parents(current_node, ordered):
+ parent_nodes = mygraph.parent_nodes(current_node)
+ if parent_nodes:
+ child_nodes = set(mygraph.child_nodes(current_node))
+ selected_parent = None
+ # First, try to avoid a direct cycle.
+ for node in parent_nodes:
+ if node not in traversed_nodes and \
+ node not in child_nodes:
+ edge = (current_node, node)
+ if edge in shown_edges:
+ continue
+ selected_parent = node
+ break
+ if not selected_parent:
+ # A direct cycle is unavoidable.
+ for node in parent_nodes:
+ if node not in traversed_nodes:
+ edge = (current_node, node)
+ if edge in shown_edges:
+ continue
+ selected_parent = node
+ break
+ if selected_parent:
+ shown_edges.add((current_node, selected_parent))
+ traversed_nodes.add(selected_parent)
+ add_parents(selected_parent, False)
+ display_list.append((list(current_node),
+ len(tree_nodes), ordered))
+ tree_nodes.append(current_node)
+ tree_nodes = []
+ add_parents(graph_key, True)
+ else:
+ display_list.append((x, depth, True))
+ mylist = display_list
last_merge_depth = 0
for i in xrange(len(mylist)-1,-1,-1):
- if "blocks" == mylist[i][0]:
+ graph_key, depth, ordered = mylist[i]
+ if not ordered and depth == 0 and i > 0 \
+ and graph_key == mylist[i-1][0] and \
+ mylist[i-1][1] == 0:
+ # An ordered node got a consecutive duplicate when the tree was
+ # being filled in.
+ del mylist[i]
+ continue
+ if "blocks" == graph_key[0]:
continue
- graph_key = tuple(mylist[i])
- if mylist[i][-1] != "nomerge":
- last_merge_depth = node_depth[graph_key]
+ if ordered and graph_key[-1] != "nomerge":
+ last_merge_depth = depth
continue
- if node_depth[graph_key] >= last_merge_depth or \
+ if depth >= last_merge_depth or \
i < len(mylist) - 1 and \
- node_depth[graph_key] >= node_depth[tuple(mylist[i+1])]:
+ depth >= mylist[i+1][1]:
del mylist[i]
- del node_depth[graph_key]
- del tree_nodes
+ from portage import flatten
+ from portage.dep import use_reduce, paren_reduce
display_overlays=False
# files to fetch list - avoids counting a same file twice
# in size display (verbose mode)
myfetchlist=[]
- for x in mylist:
+ worldlist = set(getlist(self.settings, "world"))
+
+ for mylist_index in xrange(len(mylist)):
+ x, depth, ordered = mylist[mylist_index]
pkg_type = x[0]
myroot = x[1]
pkg_key = x[2]
if x[0]=="blocks":
addl=""+red("B")+" "+fetch+" "
- counters.blocks += 1
+ if ordered:
+ counters.blocks += 1
resolved = portage.key_expand(
pkg_key, mydb=vardb, settings=pkgsettings)
if "--columns" in self.myopts and "--quiet" in self.myopts:
- print addl,red(resolved),
+ addl = addl + " " + red(resolved)
else:
- print "["+x[0]+" "+addl+"]",red(resolved),
+ addl = "[blocks " + addl + "] " + red(resolved)
block_parents = self.blocker_parents[tuple(x)]
block_parents = set([pnode[2] for pnode in block_parents])
block_parents = ", ".join(block_parents)
if resolved!=x[2]:
- print bad("(\"%s\" is blocking %s)") % \
+ addl += bad(" (\"%s\" is blocking %s)") % \
(pkg_key, block_parents)
else:
- print bad("(is blocking %s)") % block_parents
+ addl += bad(" (is blocking %s)") % block_parents
+ blockers.append(addl)
else:
mydbapi = self.trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
+ pkg_status = x[3]
+ pkg_merge = ordered and pkg_status != "nomerge"
binary_package = True
if "ebuild" == pkg_type:
if "merge" == x[3] or \
self.useFlags[myroot][pkg_key] = mydbapi.aux_get(
pkg_key, ["USE"])[0].split()
+ try:
+ restrict = flatten(use_reduce(paren_reduce(
+ mydbapi.aux_get(pkg_key, ["RESTRICT"])[0]),
+ uselist=self.useFlags[myroot][pkg_key]))
+ except portage.exception.InvalidDependString, e:
+ if pkg_status != "nomerge":
+ restrict = mydbapi.aux_get(pkg_key, ["RESTRICT"])[0]
+ show_invalid_depstring_notice(x, restrict, str(e))
+ del e
+ sys.exit(1)
+ restrict = []
if "ebuild" == pkg_type and x[3] != "nomerge" and \
- "fetch" in portdb.aux_get(
- x[2], ["RESTRICT"])[0].split():
+ "fetch" in restrict:
fetch = red("F")
- counters.restrict_fetch += 1
+ if ordered:
+ counters.restrict_fetch += 1
if portdb.fetch_check(
pkg_key, self.useFlags[myroot][pkg_key]):
fetch = green("f")
- counters.restrict_fetch_satisfied += 1
+ if ordered:
+ counters.restrict_fetch_satisfied += 1
#we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
#param is used for -u, where you still *do* want to see when something is being upgraded.
if vardb.cpv_exists(pkg_key):
addl=" "+yellow("R")+fetch+" "
if x[3] != "nomerge":
- counters.reinst += 1
+ if ordered:
+ counters.reinst += 1
elif vardb.match(portage.dep_getkey(pkg_key)):
mynewslot = mydbapi.aux_get(pkg_key, ["SLOT"])[0]
myoldlist = self.trees[x[1]]["vartree"].dbapi.match(
if portage.pkgcmp(portage.pkgsplit(x[2]), portage.pkgsplit(myoldbest)) < 0:
# Downgrade in slot
addl+=turquoise("U")+blue("D")
- counters.downgrades += 1
+ if ordered:
+ counters.downgrades += 1
else:
# Update in slot
addl+=turquoise("U")+" "
- counters.upgrades += 1
+ if ordered:
+ counters.upgrades += 1
else:
# New slot, mark it new.
addl=" "+green("NS")+fetch+" "
- counters.newslot += 1
+ if ordered:
+ counters.newslot += 1
if "--changelog" in self.myopts:
slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
inst_matches[0], pkg_key))
else:
addl=" "+green("N")+" "+fetch+" "
- counters.new += 1
+ if ordered:
+ counters.new += 1
verboseadd=""
if verbosity == 3:
# size verbose
mysize=0
- if x[0] == "ebuild" and x[-1]!="nomerge":
+ if pkg_type == "ebuild" and pkg_merge:
try:
myfilesdict = portdb.getfetchsizes(pkg_key,
useflags=self.useFlags[myroot][pkg_key],
if myfilesdict is None:
myfilesdict="[empty/missing/bad digest]"
else:
- for myfetchfile in myfilesdict.keys():
+ for myfetchfile in myfilesdict:
if myfetchfile not in myfetchlist:
mysize+=myfilesdict[myfetchfile]
myfetchlist.append(myfetchfile)
else:
verboseadd += "[No ebuild?]"
- xs=portage.pkgsplit(x[2])
+ xs = list(portage.pkgsplit(x[2]))
if xs[2]=="r0":
xs[2]=""
else:
oldlp=mywidth-30
newlp=oldlp-30
- indent = " " * node_depth[tuple(x)]
+ indent = " " * depth
if myoldbest:
myoldbest=portage.pkgsplit(myoldbest)[1]+"-"+portage.pkgsplit(myoldbest)[2]
myoldbest=myoldbest[:-3]
myoldbest=blue("["+myoldbest+"]")
+ pkg_cp = xs[0]
+ pkg_world = pkg_cp in worldlist
+
+ def pkgprint(pkg):
+ if pkg_merge:
+ if pkg_world:
+ return colorize("PKG_MERGE_WORLD", pkg)
+ else:
+ return colorize("PKG_MERGE", pkg)
+ else:
+ if pkg_world:
+ return colorize("PKG_NOMERGE_WORLD", pkg)
+ else:
+ return colorize("PKG_NOMERGE", pkg)
+
if x[1]!="/":
if myoldbest:
myoldbest +=" "
if "--columns" in self.myopts:
if "--quiet" in self.myopts:
- myprint=addl+" "+indent+darkgreen(xs[0])
+ myprint=addl+" "+indent+pkgprint(pkg_cp)
myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
myprint=myprint+myoldbest
myprint=myprint+darkgreen("to "+x[1])
else:
- myprint="["+x[0]+" "+addl+"] "+indent+darkgreen(xs[0])
+ myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_cp)
if (newlp-nc_len(myprint)) > 0:
myprint=myprint+(" "*(newlp-nc_len(myprint)))
myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
myprint=myprint+myoldbest
myprint=myprint+darkgreen("to "+x[1])+" "+verboseadd
else:
- if x[3] == "nomerge":
- myprint = darkblue("[nomerge ] ")
+ if not pkg_merge:
+ myprint = "[%s ] " % pkgprint("nomerge")
else:
myprint = "[" + pkg_type + " " + addl + "] "
- myprint += indent + darkgreen(pkg_key) + " " + \
+ myprint += indent + pkgprint(pkg_key) + " " + \
myoldbest + darkgreen("to " + myroot) + " " + \
verboseadd
else:
if "--columns" in self.myopts:
if "--quiet" in self.myopts:
- myprint=addl+" "+indent+darkgreen(xs[0])
+ myprint=addl+" "+indent+pkgprint(pkg_cp)
myprint=myprint+" "+green(xs[1]+xs[2])+" "
myprint=myprint+myoldbest
else:
- myprint="["+x[0]+" "+addl+"] "+indent+darkgreen(xs[0])
+ myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_cp)
if (newlp-nc_len(myprint)) > 0:
myprint=myprint+(" "*(newlp-nc_len(myprint)))
myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
myprint=myprint+(" "*(oldlp-nc_len(myprint)))
myprint=myprint+myoldbest+" "+verboseadd
else:
- if x[3]=="nomerge":
- myprint=darkblue("[nomerge ] "+indent+x[2]+" "+myoldbest+" ")+verboseadd
+ if not pkg_merge:
+ myprint="["+pkgprint("nomerge")+" ] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
else:
- myprint="["+x[0]+" "+addl+"] "+indent+darkgreen(x[2])+" "+myoldbest+" "+verboseadd
+ myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
p.append(myprint)
mysplit = portage.pkgsplit(x[2])
myversion = "%s-%s" % (mysplit[1], mysplit[2])
if myversion != portage.VERSION and "--quiet" not in self.myopts:
- if mylist.index(x) < len(mylist) - 1 and \
+ if mylist_index < len(mylist) - 1 and \
"livecvsportage" not in self.settings.features:
p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
p.append(colorize("WARN", " then resume the merge."))
for x in p:
print x
+ for x in blockers:
+ print x
if verbosity == 3:
print
sys.stderr.write("".join(msg))
def calc_changelog(self,ebuildpath,current,next):
+ if ebuildpath == None or not os.path.exists(ebuildpath):
+ return []
current = '-'.join(portage.catpkgsplit(current)[1:])
- if current.endswith('-r0'): current = current[:-3]
+ if current.endswith('-r0'):
+ current = current[:-3]
next = '-'.join(portage.catpkgsplit(next)[1:])
- if next.endswith('-r0'): next = next[:-3]
+ if next.endswith('-r0'):
+ next = next[:-3]
changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
try:
changelog = open(changelogpath).read()
portage.config(clone=trees["/"]["vartree"].settings)
def merge(self, mylist, favorites, mtimedb):
+ from portage.elog import elog_process
failed_fetches = []
+ fetchonly = "--fetchonly" in self.myopts or \
+ "--fetch-all-uri" in self.myopts
mymergelist=[]
ldpath_mtimes = mtimedb["ldpath"]
xterm_titles = "notitles" not in self.settings.features
#check for blocking dependencies
if "--fetchonly" not in self.myopts and \
+ "--fetch-all-uri" not in self.myopts and \
"--buildpkgonly" not in self.myopts:
for x in mylist:
if x[0]=="blocks":
print "!!! Please use 'emerge --pretend' to determine blockers."
if "--quiet" not in self.myopts:
show_blocker_docs_link()
- if "--pretend" not in self.myopts:
- try:
- del mtimedb["resume"]
- except KeyError:
- pass
- sys.exit(1)
+ return 1
- #buildsyspkg: I need mysysdict also on resume (moved from the else block)
- mysysdict = genericdict(getlist(self.settings, "system"))
if "--resume" in self.myopts:
# We're resuming.
print colorize("GOOD", "*** Resuming merge...")
emergelog(xterm_titles, " *** Resuming merge...")
- mymergelist=mtimedb["resume"]["mergelist"][:]
- if "--skipfirst" in self.myopts and mymergelist:
+ mylist = mtimedb["resume"]["mergelist"][:]
+ if "--skipfirst" in self.myopts and mylist:
del mtimedb["resume"]["mergelist"][0]
- del mymergelist[0]
+ del mylist[0]
mtimedb.commit()
- validate_merge_list(self.trees, mymergelist)
- else:
+ validate_merge_list(self.trees, mylist)
+ mymergelist = mylist
+
+ # Verify all the manifests now so that the user is notified of failure
+ # as soon as possible.
+ if "--fetchonly" not in self.myopts and \
+ "--fetch-all-uri" not in self.myopts and \
+ "strict" in self.settings.features:
+ shown_verifying_msg = False
+ quiet_settings = {}
+ for myroot, pkgsettings in self.pkgsettings.iteritems():
+ quiet_config = portage.config(clone=pkgsettings)
+ quiet_config["PORTAGE_QUIET"] = "1"
+ quiet_config.backup_changes("PORTAGE_QUIET")
+ quiet_settings[myroot] = quiet_config
+ del quiet_config
+ for x in mylist:
+ if x[0] != "ebuild" or x[-1] == "nomerge":
+ continue
+ if not shown_verifying_msg:
+ shown_verifying_msg = True
+ print ">>> Verifying ebuild Manifests..."
+ mytype, myroot, mycpv, mystatus = x
+ portdb = self.trees[myroot]["porttree"].dbapi
+ quiet_config = quiet_settings[myroot]
+ quiet_config["O"] = os.path.dirname(portdb.findname(mycpv))
+ if not portage.digestcheck([], quiet_config, strict=True):
+ return 1
+ del x, mytype, myroot, mycpv, mystatus, quiet_config
+ del shown_verifying_msg, quiet_settings
+
+ #buildsyspkg: I need mysysdict also on resume (moved from the else block)
+ mysysdict = genericdict(getlist(self.settings, "system"))
+ if "--resume" not in self.myopts:
myfavs = portage.grabfile(
os.path.join(self.target_root, portage.WORLD_FILE))
myfavdict=genericdict(myfavs)
"--pretend" in self.myopts):
portage.write_atomic(
os.path.join(self.target_root, portage.WORLD_FILE),
- "\n".join(myfavdict.values()))
+ "\n".join(sorted(myfavdict.values())) + "\n")
mtimedb["resume"]["mergelist"]=mymergelist[:]
mtimedb.commit()
fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs"
fetch_env["PORTAGE_NICENESS"] = "0"
fetch_args = [sys.argv[0], "--resume", "--fetchonly"]
- for myopt, myarg in self.myopts.iteritems():
+ resume_opts = self.myopts.copy()
+ # For automatic resume, we need to prevent
+ # any of bad_resume_opts from leaking in
+ # via EMERGE_DEFAULT_OPTS.
+ resume_opts["--ignore-default-opts"] = True
+ for myopt, myarg in resume_opts.iteritems():
if myopt not in bad_resume_opts:
if myarg is True:
fetch_args.append(myopt)
portage.process.spawn(fetch_args, env=fetch_env,
fd_pipes=fd_pipes, returnpid=True)
logfile.close() # belongs to the spawned process
+ del fetch_log, logfile, fd_pipes, fetch_env, fetch_args, \
+ resume_opts
mergecount=0
for x in mymergelist:
(mergecount, len(mymergelist), pkg_key)
emergelog(xterm_titles, msg, short_msg=short_msg)
self.trees[myroot]["bintree"].prevent_collision(pkg_key)
+ binpkg_tmpfile = os.path.join(pkgsettings["PKGDIR"],
+ pkg_key + ".tbz2." + str(os.getpid()))
+ pkgsettings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
+ pkgsettings.backup_changes("PORTAGE_BINPKG_TMPFILE")
retval = portage.doebuild(y, "package", myroot,
pkgsettings, self.edebug, mydbapi=portdb,
tree="porttree")
+ del pkgsettings["PORTAGE_BINPKG_TMPFILE"]
+ if "--buildpkgonly" in self.myopts:
+ elog_process(pkg_key, pkgsettings)
if retval != os.EX_OK:
return retval
+ bintree = self.trees[myroot]["bintree"]
+ bintree.inject(pkg_key, filename=binpkg_tmpfile)
if "--buildpkgonly" not in self.myopts:
- bintree = self.trees[myroot]["bintree"]
- bintree.inject(pkg_key)
- mytbz2 = bintree.getname(pkg_key)
msg = " === (%s of %s) Merging (%s::%s)" % \
(mergecount, len(mymergelist), pkg_key, y)
short_msg = "emerge: (%s of %s) %s Merge" % \
short_msg = "emerge: (%s of %s) %s Fetch" % \
(mergecount, len(mymergelist), pkg_key)
emergelog(xterm_titles, msg, short_msg=short_msg)
- if not self.trees[myroot]["bintree"].gettbz2(
- pkg_key):
- return 1
+ try:
+ self.trees[myroot]["bintree"].gettbz2(pkg_key)
+ except portage.exception.FileNotFound:
+ writemsg("!!! Fetching Binary failed " + \
+ "for '%s'\n" % pkg_key, noiselevel=-1)
+ if not fetchonly:
+ return 1
+ failed_fetches.append(pkg_key)
+ except portage.exception.DigestException, e:
+ writemsg("\n!!! Digest verification failed:\n",
+ noiselevel=-1)
+ writemsg("!!! %s\n" % e.value[0],
+ noiselevel=-1)
+ writemsg("!!! Reason: %s\n" % e.value[1],
+ noiselevel=-1)
+ writemsg("!!! Got: %s\n" % e.value[2],
+ noiselevel=-1)
+ writemsg("!!! Expected: %s\n" % e.value[3],
+ noiselevel=-1)
+ os.unlink(mytbz2)
+ if not fetchonly:
+ return 1
+ failed_fetches.append(pkg_key)
finally:
if tbz2_lock:
portage.locks.unlockfile(tbz2_lock)
") Updating world file ("+x[pkgindex]+")")
portage.write_atomic(
os.path.join(myroot, portage.WORLD_FILE),
- "\n".join(myfavdict.values()))
+ "\n".join(sorted(myfavdict.values()))+"\n")
if "--pretend" not in self.myopts and \
"--fetchonly" not in self.myopts and \
mtimedb.commit()
portage.run_exitfuncs()
mynewargv=[sys.argv[0],"--resume"]
- for myopt, myarg in self.myopts.iteritems():
+ resume_opts = self.myopts.copy()
+ # For automatic resume, we need to prevent
+ # any of bad_resume_opts from leaking in
+ # via EMERGE_DEFAULT_OPTS.
+ resume_opts["--ignore-default-opts"] = True
+ for myopt, myarg in resume_opts.iteritems():
if myopt not in bad_resume_opts:
if myarg is True:
mynewargv.append(myopt)
if not slotmap.has_key(myslot):
slotmap[myslot]={}
slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)]=mypkg
- for myslot in slotmap.keys():
+ for myslot in slotmap:
counterkeys=slotmap[myslot].keys()
counterkeys.sort()
if not counterkeys:
finally:
if vdb_lock:
portage.locks.unlockdir(vdb_lock)
- for x in pkgmap.keys():
+ for x in pkgmap:
for y in localtree.dep_match(x):
if y not in pkgmap[x]["omitted"] and \
y not in pkgmap[x]["selected"] and \
if not autoclean:
countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
- for x in pkgmap.keys():
+ for x in pkgmap:
for y in pkgmap[x]["selected"]:
print ">>> Unmerging "+y+"..."
emergelog(xterm_titles, "=== Unmerging... ("+y+")")
def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
- print
if os.path.exists("/usr/bin/install-info"):
regen_infodirs=[]
for z in infodirs:
print " "+green("*")+" Processed",icount,"info files."
-def post_emerge(settings, mtimedb, retval):
+def display_news_notification(trees):
+ for target_root in trees:
+ if len(trees) > 1 and target_root != "/":
+ break
+ settings = trees[target_root]["vartree"].settings
+ portdb = trees[target_root]["porttree"].dbapi
+ vardb = trees[target_root]["vartree"].dbapi
+ NEWS_PATH = os.path.join("metadata", "news")
+ UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
+ newsReaderDisplay = False
+
+ for repo in portdb.getRepositories():
+ unreadItems = checkUpdatedNewsItems(
+ portdb, vardb, NEWS_PATH, UNREAD_PATH, repo)
+ if unreadItems:
+ if not newsReaderDisplay:
+ newsReaderDisplay = True
+ print
+ print colorize("WARN", " * IMPORTANT:"),
+ print "%s news items need reading for repository '%s'." % (unreadItems, repo)
+
+
+ if newsReaderDisplay:
+ print colorize("WARN", " *"),
+ print "Use " + colorize("GOOD", "eselect news") + " to read news items."
+ print
+
+def post_emerge(trees, mtimedb, retval):
"""
Misc. things to run at the end of a merge session.
Update Config Files
Update News Items
Commit mtimeDB
+ Display preserved libs warnings
Exit Emerge
-
- @param settings: Configuration settings (typically portage.settings)
- @type settings: portage.config()
+
+ @param trees: A dictionary mapping each ROOT to it's package databases
+ @type trees: dict
@param mtimedb: The mtimeDB to store data needed across merge invocations
@type mtimedb: MtimeDB class instance
@param retval: Emerge's return value
@returns:
1. Calls sys.exit(retval)
"""
- target_root = settings["ROOT"]
+ for target_root in trees:
+ if len(trees) > 1 and target_root != "/":
+ break
+ vardbapi = trees[target_root]["vartree"].dbapi
+ settings = vardbapi.settings
info_mtimes = mtimedb["info"]
# Load the most current variables from ${ROOT}/etc/profile.env
emergelog("notitles" not in settings.features,
" *** exiting successfully.")
+ # Dump the mod_echo output now so that our other notifications are shown
+ # last.
+ from portage.elog import mod_echo
+ mod_echo.finalize()
+
if "noinfo" not in settings.features:
chk_updated_info_files(target_root, infodirs, info_mtimes, retval)
chk_updated_cfg_files(target_root, config_protect)
- NEWS_PATH = os.path.join( "metadata", "news" )
- UNREAD_PATH = os.path.join( target_root, NEWS_LIB_PATH, 'news')
- porttree = portdbapi( porttree_root = settings["PORTDIR"], mysettings = settings )
- newsReaderDisplay = False
-
- for repo in porttree.getRepositories():
- unreadItems = checkUpdatedNewsItems(target_root, NEWS_PATH, UNREAD_PATH, repo)
- if unreadItems:
- print colorize("WARN", " * IMPORTANT:"),
- print "%s news items need reading for repository '%s'." % (unreadItems, repo)
- newsReaderDisplay = True
+ display_news_notification(trees)
- if newsReaderDisplay:
- print colorize("WARN", " *"),
- print "Use " + colorize("GOOD", "eselect news") + " to read news items."
+ if vardbapi.plib_registry.hasEntries():
+ print colorize("WARN", "!!!") + " existing preserved libs:"
+ plibdata = vardbapi.plib_registry.getPreservedLibs()
+ for cpv in plibdata:
+ print colorize("WARN", ">>>") + " package: %s" % cpv
+ for f in plibdata[cpv]:
+ print colorize("WARN", " * ") + " - %s" % f
+ print "Use " + colorize("GOOD", "revdep-rebuild") + " to rebuild packages using these libraries"
+ print "and then remerge the packages listed above."
mtimedb.commit()
sys.exit(retval)
#print " "+yellow("*")+" Type "+green("emerge --help config")+" to learn how to update config files."
print " "+yellow("*")+" Type "+green("emerge --help config")+" to learn how to update config files."
-def checkUpdatedNewsItems( root, NEWS_PATH, UNREAD_PATH, repo_id ):
+def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id):
"""
Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
Returns the number of unread (yet relevent) items.
- @param root:
- @type root:
+ @param portdb: a portage tree database
+ @type portdb: pordbapi
+ @param vardb: an installed package database
+ @type vardb: vardbapi
@param NEWS_PATH:
@type NEWS_PATH:
@param UNREAD_PATH:
"""
from portage.news import NewsManager
- manager = NewsManager( root, NEWS_PATH, UNREAD_PATH )
+ manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
return manager.getUnreadItems( repo_id, update=True )
def is_valid_package_atom(x):
mytimeout=180
rsync_opts = []
-
+ import shlex, StringIO
if settings["PORTAGE_RSYNC_OPTS"] == "":
portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
rsync_opts.extend([
"--exclude=/local", # Exclude local from consideration
"--exclude=/packages", # Exclude packages from consideration
"--filter=H_**/files/digest-*", # Exclude manifest1 digests and delete on the receiving side
- "--prune-empty-dirs" # Prune empty ${FILESDIR} when manifest1 digests are excluded
])
else:
# defaults.
portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
- rsync_opts.extend(settings["PORTAGE_RSYNC_OPTS"].split())
+ lexer = shlex.shlex(StringIO.StringIO(
+ settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
+ lexer.whitespace_split = True
+ rsync_opts.extend(lexer)
+ del lexer
for opt in ("--recursive", "--times"):
if opt not in rsync_opts:
pass
del content
+ try:
+ rsync_initial_timeout = \
+ int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
+ except ValueError:
+ rsync_initial_timeout = 15
+
try:
if settings.has_key("RSYNC_RETRIES"):
print yellow("WARNING:")+" usage of RSYNC_RETRIES is deprecated, use PORTAGE_RSYNC_RETRIES instead"
user_name=""
updatecache_flg=True
all_rsync_opts = set(rsync_opts)
- all_rsync_opts.update(
- settings.get("PORTAGE_RSYNC_EXTRA_OPTS","").split())
- family = socket.AF_UNSPEC
+ lexer = shlex.shlex(StringIO.StringIO(
+ settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
+ lexer.whitespace_split = True
+ extra_rsync_opts = list(lexer)
+ del lexer
+ all_rsync_opts.update(extra_rsync_opts)
+ family = socket.AF_INET
if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
family = socket.AF_INET
elif socket.has_ipv6 and \
if mytimestamp != 0 and "--quiet" not in myopts:
print ">>> Checking server timestamp ..."
- rsynccommand = " ".join(["/usr/bin/rsync", " ".join(rsync_opts),
- settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")])
+ rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
if "--debug" in myopts:
print rsynccommand
exitcode = os.EX_OK
servertimestamp = 0
- if mytimestamp != 0:
- mycommand = rsynccommand.split()
+ # Even if there's no timestamp available locally, fetch the
+ # timestamp anyway as an initial probe to verify that the server is
+ # responsive. This protects us from hanging indefinitely on a
+ # connection attempt to an unresponsive server which rsync's
+ # --timeout option does not prevent.
+ if True:
+ mycommand = rsynccommand[:]
mycommand.append(dosyncuri.rstrip("/") + \
"/metadata/timestamp.chk")
mycommand.append(tmpservertimestampfile)
# Timeout here in case the server is unresponsive. The
# --timeout rsync option doesn't apply to the initial
# connection attempt.
- signal.alarm(15)
+ if rsync_initial_timeout:
+ signal.alarm(rsync_initial_timeout)
try:
mypids.extend(portage.process.spawn(
mycommand, env=settings.environ(), returnpid=True))
exitcode = os.waitpid(mypids[0], 0)[1]
content = portage.grabfile(tmpservertimestampfile)
finally:
- signal.alarm(0)
+ if rsync_initial_timeout:
+ signal.alarm(0)
try:
os.unlink(tmpservertimestampfile)
except OSError:
print
elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
# actual sync
- mycommand=rsynccommand+" "+dosyncuri+"/ "+myportdir
- mycommand = mycommand.split()
+ mycommand = rsynccommand + [dosyncuri+"/", myportdir]
exitcode = portage.process.spawn(mycommand,
env=settings.environ())
if exitcode in [0,1,3,4,11,14,20,21]:
# Code 2 indicates protocol incompatibility, which is expected
# for servers with protocol < 29 that don't support
# --prune-empty-directories. Retry for a server that supports
- # at least rsync protocol version 29 (>=rsync-2.6.7).
+ # at least rsync protocol version 29 (>=rsync-2.6.4).
pass
retries=retries+1
if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
action_metadata(settings, portdb, myopts)
- if portage.global_updates(settings, trees, mtimedb["updates"]):
+ if portage._global_updates(trees, mtimedb["updates"]):
mtimedb.commit()
# Reload the whole config from scratch.
settings, trees, mtimedb = load_emerge_config(trees=trees)
print red(" * ")+"configuration files."
print red(" * ")+"To update portage, run 'emerge portage'."
print
+
+ display_news_notification(trees)
def action_metadata(settings, portdb, myopts):
portage.writemsg_stdout("\n>>> Updating Portage cache: ")
dead_nodes = None
break
for x in mynodes:
- mymatches = portdb.xmatch("match-all",x)
+ mymatches = portdb.cp_list(x)
portage.writemsg_stdout("processing %s\n" % x)
for y in mymatches:
try:
for x in myvars:
if portage.isvalidatom(x):
pkg_matches = trees["/"]["vartree"].dbapi.match(x)
+ pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
+ pkg_matches.sort(portage.pkgcmp)
pkgs = []
- for y in pkg_matches:
- mycpv = portage.catpkgsplit(y)
- if(mycpv[3] != "r0"):
- pkgs += [mycpv[2] + "-" + mycpv[3]]
+ for pn, ver, rev in pkg_matches:
+ if rev != "r0":
+ pkgs.append(ver + "-" + rev)
else:
- pkgs += [mycpv[2]]
+ pkgs.append(ver)
if pkgs:
- pkgs = ", ".join(sorted_versions(pkgs))
+ pkgs = ", ".join(pkgs)
print "%-20s %s" % (x+":", pkgs)
else:
print "%-20s %s" % (x+":", "[NOT VALID]")
dep_check_trees[myroot]["porttree"] = dep_check_trees[myroot]["vartree"]
syslist = getlist(settings, "system")
worldlist = getlist(settings, "world")
+ system_world_dict = genericdict(worldlist)
+ system_world_dict.update(genericdict(syslist))
fakedb = portage.fakedbapi(settings=settings)
myvarlist = vardb.cpv_all()
if not atom.startswith("!") and priority == hard:
unresolveable.setdefault(atom, []).append(parent)
continue
- # Could put slot checking here to ensure that there aren't two
- # packages with the same slot...
+ if portage.dep_getkey(atom) not in system_world_dict:
+ # Prune all but the best matching slot, since that's all that a
+ # deep world update would pull in. Don't prune if the cpv is in
+ # system or world though, since those sets trigger greedy update
+ # of all slots.
+ pkgs = [portage.best(pkgs)]
for pkg in pkgs:
if fakedb.cpv_exists(pkg):
continue
print atom, "required by", " ".join(unresolveable[atom])
print
print "Have you forgotten to run " + good("`emerge --update --newuse --deep world`") + " prior to"
- print "depclean? It may also be necessary to manually uninstalled packages that no"
- print "longer exist in the portage tree since it may not be possible to satisfy their"
- print "dependencies."
+ print "depclean? It may be necessary to manually uninstall packages that no longer"
+ print "exist in the portage tree since it may not be possible to satisfy their"
+ print "dependencies. Also, be aware of the --with-bdeps option that is documented"
+ print "in " + good("`man emerge`") + "."
print
return
if myopt not in myresumeopts:
myresumeopts[myopt] = myarg
myopts=myresumeopts
+
+ # Adjust config according to options of the command being resumed.
+ for myroot in trees:
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+ del myroot, mysettings
+
myparams = create_depgraph_params(myopts, myaction)
if "--quiet" not in myopts and "--nodeps" not in myopts:
print "Calculating dependencies ",
else:
mydepgraph.display(
mydepgraph.altlist(reversed=("--tree" in myopts)))
+ if "--buildpkgonly" in myopts and \
+ not mydepgraph.digraph.hasallzeros(ignore_priority=DepPriority.MEDIUM):
+ print "\n!!! --buildpkgonly requires all dependencies to be merged."
+ print "!!! You have to merge the dependencies before you can build this package.\n"
+ sys.exit(1)
else:
if ("--buildpkgonly" in myopts):
if not mydepgraph.digraph.hasallzeros(ignore_priority=DepPriority.MEDIUM):
def validate_ebuild_environment(trees):
for myroot in trees:
mysettings = trees[myroot]["vartree"].settings
- if not mysettings.get("ARCH", None):
- print >> sys.stderr, bad("\a!!! ARCH is not set... " + \
+ for var in "ARCH", "USERLAND":
+ if mysettings.get(var):
+ continue
+ print >> sys.stderr, bad(("\a!!! %s is not set... " % var) + \
"Are you missing the '%setc/make.profile' symlink?" % \
mysettings["PORTAGE_CONFIGROOT"])
print >> sys.stderr, bad("\a!!! Is the symlink correct? " + \
portage.writemsg("!!! %s\n" % str(e))
del e
- if portage.global_updates(settings, trees, mtimedb["updates"]):
+ if portage._global_updates(trees, mtimedb["updates"]):
mtimedb.commit()
# Reload the whole config from scratch.
settings, trees, mtimedb = load_emerge_config(trees=trees)
xterm_titles = "notitles" not in settings.features
tmpcmdline = []
- if "--ignore-default-opts" not in sys.argv:
+ if "--ignore-default-opts" not in myopts:
tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
tmpcmdline.extend(sys.argv[1:])
myaction, myopts, myfiles = parse_opts(tmpcmdline)
if "candy" in settings.features:
spinner.update = spinner.update_scroll
- portage.deprecated_profile_check()
+ if "--quiet" not in myopts:
+ portage.deprecated_profile_check()
#Freeze the portdbapi for enhanced performance:
for myroot in trees:
# check if root user is the current user for the actions where emerge needs this
if portage.secpass < 2:
# We've already allowed "--version" and "--help" above.
- if "--pretend" not in myopts and \
- myaction not in ("search","info"):
+ if "--pretend" not in myopts and myaction not in ("search","info"):
need_superuser = not \
("--fetchonly" in myopts or \
"--fetch-all-uri" in myopts or \
- myaction in ("metadata", "regen"))
+ myaction in ("metadata", "regen") or \
+ (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
if portage.secpass < 1 or \
need_superuser:
if need_superuser:
if "--ask" in myopts:
myopts["--pretend"] = True
del myopts["--ask"]
- print ("%s access would be required... " + \
+ print ("%s access is required... " + \
"adding --pretend to options.\n") % access_desc
if portage.secpass < 1 and not need_superuser:
portage_group_warning()
else:
- sys.stderr.write(("emerge: %s access would be " + \
+ sys.stderr.write(("emerge: %s access is " + \
"required.\n\n") % access_desc)
if portage.secpass < 1 and not need_superuser:
portage_group_warning()
if 1 == unmerge(settings, myopts, vartree, myaction, myfiles,
mtimedb["ldpath"]):
if "--pretend" not in myopts:
- post_emerge(settings, mtimedb, 0)
+ post_emerge(trees, mtimedb, os.EX_OK)
elif "depclean"==myaction:
validate_ebuild_environment(trees)
action_depclean(settings, trees, mtimedb["ldpath"],
myopts, spinner)
if "--pretend" not in myopts:
- post_emerge(settings, mtimedb, 0)
+ post_emerge(trees, mtimedb, os.EX_OK)
# "update", "system", or just process files:
else:
validate_ebuild_environment(trees)
+ if "--pretend" not in myopts:
+ display_news_notification(trees)
action_build(settings, trees, mtimedb,
myopts, myaction, myfiles, spinner)
if "--pretend" not in myopts:
- post_emerge(settings, mtimedb, 0)
+ post_emerge(trees, mtimedb, os.EX_OK)
+ else:
+ display_news_notification(trees)
if __name__ == "__main__":
retval = emerge_main()