20597c0536c907dfa63ebff80452e28598518c0f
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
5
6 import sys
7 # This block ensures that ^C interrupts are handled quietly.
8 try:
9         import signal
10
11         def exithandler(signum,frame):
12                 signal.signal(signal.SIGINT, signal.SIG_IGN)
13                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14                 sys.exit(1)
15         
16         signal.signal(signal.SIGINT, exithandler)
17         signal.signal(signal.SIGTERM, exithandler)
18         signal.signal(signal.SIGPIPE, signal.SIG_DFL)
19
20 except KeyboardInterrupt:
21         sys.exit(1)
22
23 import array
24 from collections import deque
25 import fcntl
26 import formatter
27 import logging
28 import select
29 import shlex
30 import shutil
31 import textwrap
32 import urlparse
33 import weakref
34 import gc
35 import os, stat
36 import platform
37
38 try:
39         import portage
40 except ImportError:
41         from os import path as osp
42         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
43         import portage
44
45 from portage import digraph
46 from portage.const import NEWS_LIB_PATH
47
48 import _emerge.help
49 import portage.xpak, commands, errno, re, socket, time, types
50 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
51         nc_len, red, teal, turquoise, xtermTitle, \
52         xtermTitleReset, yellow
53 from portage.output import create_color_func
54 good = create_color_func("GOOD")
55 bad = create_color_func("BAD")
56 # white looks bad on terminals with white background
57 from portage.output import bold as white
58
59 import portage.elog
60 import portage.dep
61 portage.dep._dep_check_strict = True
62 import portage.util
63 import portage.locks
64 import portage.exception
65 from portage.data import secpass
66 from portage.elog.messages import eerror
67 from portage.util import normalize_path as normpath
68 from portage.util import writemsg, writemsg_level
69 from portage.sets import load_default_config, SETPREFIX
70 from portage.sets.base import InternalPackageSet
71
72 from itertools import chain, izip
73 from UserDict import DictMixin
74
75 try:
76         import cPickle as pickle
77 except ImportError:
78         import pickle
79
80 try:
81         import cStringIO as StringIO
82 except ImportError:
83         import StringIO
84
85 class stdout_spinner(object):
86         scroll_msgs = [
87                 "Gentoo Rocks ("+platform.system()+")",
88                 "Thank you for using Gentoo. :)",
89                 "Are you actually trying to read this?",
90                 "How many times have you stared at this?",
91                 "We are generating the cache right now",
92                 "You are paying too much attention.",
93                 "A theory is better than its explanation.",
94                 "Phasers locked on target, Captain.",
95                 "Thrashing is just virtual crashing.",
96                 "To be is to program.",
97                 "Real Users hate Real Programmers.",
98                 "When all else fails, read the instructions.",
99                 "Functionality breeds Contempt.",
100                 "The future lies ahead.",
101                 "3.1415926535897932384626433832795028841971694",
102                 "Sometimes insanity is the only alternative.",
103                 "Inaccuracy saves a world of explanation.",
104         ]
105
106         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
107
108         def __init__(self):
109                 self.spinpos = 0
110                 self.update = self.update_twirl
111                 self.scroll_sequence = self.scroll_msgs[
112                         int(time.time() * 100) % len(self.scroll_msgs)]
113                 self.last_update = 0
114                 self.min_display_latency = 0.05
115
116         def _return_early(self):
117                 """
118                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
119                 each update* method should return without doing any output when this
120                 method returns True.
121                 """
122                 cur_time = time.time()
123                 if cur_time - self.last_update < self.min_display_latency:
124                         return True
125                 self.last_update = cur_time
126                 return False
127
128         def update_basic(self):
129                 self.spinpos = (self.spinpos + 1) % 500
130                 if self._return_early():
131                         return
132                 if (self.spinpos % 100) == 0:
133                         if self.spinpos == 0:
134                                 sys.stdout.write(". ")
135                         else:
136                                 sys.stdout.write(".")
137                 sys.stdout.flush()
138
139         def update_scroll(self):
140                 if self._return_early():
141                         return
142                 if(self.spinpos >= len(self.scroll_sequence)):
143                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
144                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
145                 else:
146                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
147                 sys.stdout.flush()
148                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
149
150         def update_twirl(self):
151                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
152                 if self._return_early():
153                         return
154                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
155                 sys.stdout.flush()
156
157         def update_quiet(self):
158                 return
159
160 def userquery(prompt, responses=None, colours=None):
161         """Displays a prompt and a set of responses, then waits for a response
162         which is checked against the responses and the first to match is
163         returned.  An empty response will match the first value in responses.  The
164         input buffer is *not* cleared prior to the prompt!
165
166         prompt: a String.
167         responses: a List of Strings.
168         colours: a List of Functions taking and returning a String, used to
169         process the responses for display. Typically these will be functions
170         like red() but could be e.g. lambda x: "DisplayString".
171         If responses is omitted, defaults to ["Yes", "No"], [green, red].
172         If only colours is omitted, defaults to [bold, ...].
173
174         Returns a member of the List responses. (If called without optional
175         arguments, returns "Yes" or "No".)
176         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
177         printed."""
178         if responses is None:
179                 responses = ["Yes", "No"]
180                 colours = [
181                         create_color_func("PROMPT_CHOICE_DEFAULT"),
182                         create_color_func("PROMPT_CHOICE_OTHER")
183                 ]
184         elif colours is None:
185                 colours=[bold]
186         colours=(colours*len(responses))[:len(responses)]
187         print bold(prompt),
188         try:
189                 while True:
190                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
191                         for key in responses:
192                                 # An empty response will match the first value in responses.
193                                 if response.upper()==key[:len(response)].upper():
194                                         return key
195                         print "Sorry, response '%s' not understood." % response,
196         except (EOFError, KeyboardInterrupt):
197                 print "Interrupted."
198                 sys.exit(1)
199
200 actions = frozenset([
201 "clean", "config", "depclean",
202 "info", "list-sets", "metadata",
203 "prune", "regen",  "search",
204 "sync",  "unmerge",
205 ])
206 options=[
207 "--ask",          "--alphabetical",
208 "--buildpkg",     "--buildpkgonly",
209 "--changelog",    "--columns",
210 "--complete-graph",
211 "--debug",        "--deep",
212 "--digest",
213 "--emptytree",
214 "--fetchonly",    "--fetch-all-uri",
215 "--getbinpkg",    "--getbinpkgonly",
216 "--help",         "--ignore-default-opts",
217 "--keep-going",
218 "--noconfmem",
219 "--newuse",       "--nocolor",
220 "--nodeps",       "--noreplace",
221 "--nospinner",    "--oneshot",
222 "--onlydeps",     "--pretend",
223 "--quiet",        "--resume",
224 "--searchdesc",   "--selective",
225 "--skipfirst",
226 "--tree",
227 "--update",
228 "--usepkg",       "--usepkgonly",
229 "--verbose",      "--version"
230 ]
231
232 shortmapping={
233 "1":"--oneshot",
234 "a":"--ask",
235 "b":"--buildpkg",  "B":"--buildpkgonly",
236 "c":"--clean",     "C":"--unmerge",
237 "d":"--debug",     "D":"--deep",
238 "e":"--emptytree",
239 "f":"--fetchonly", "F":"--fetch-all-uri",
240 "g":"--getbinpkg", "G":"--getbinpkgonly",
241 "h":"--help",
242 "k":"--usepkg",    "K":"--usepkgonly",
243 "l":"--changelog",
244 "n":"--noreplace", "N":"--newuse",
245 "o":"--onlydeps",  "O":"--nodeps",
246 "p":"--pretend",   "P":"--prune",
247 "q":"--quiet",
248 "s":"--search",    "S":"--searchdesc",
249 "t":"--tree",
250 "u":"--update",
251 "v":"--verbose",   "V":"--version"
252 }
253
254 def emergelog(xterm_titles, mystr, short_msg=None):
255         if xterm_titles and short_msg:
256                 if "HOSTNAME" in os.environ:
257                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
258                 xtermTitle(short_msg)
259         try:
260                 file_path = "/var/log/emerge.log"
261                 mylogfile = open(file_path, "a")
262                 portage.util.apply_secpass_permissions(file_path,
263                         uid=portage.portage_uid, gid=portage.portage_gid,
264                         mode=0660)
265                 mylock = None
266                 try:
267                         mylock = portage.locks.lockfile(mylogfile)
268                         # seek because we may have gotten held up by the lock.
269                         # if so, we may not be positioned at the end of the file.
270                         mylogfile.seek(0, 2)
271                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
272                         mylogfile.flush()
273                 finally:
274                         if mylock:
275                                 portage.locks.unlockfile(mylock)
276                         mylogfile.close()
277         except (IOError,OSError,portage.exception.PortageException), e:
278                 if secpass >= 1:
279                         print >> sys.stderr, "emergelog():",e
280
281 def countdown(secs=5, doing="Starting"):
282         if secs:
283                 print ">>> Waiting",secs,"seconds before starting..."
284                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
285                 ticks=range(secs)
286                 ticks.reverse()
287                 for sec in ticks:
288                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
289                         sys.stdout.flush()
290                         time.sleep(1)
291                 print
292
293 # formats a size given in bytes nicely
294 def format_size(mysize):
295         if type(mysize) not in [types.IntType,types.LongType]:
296                 return str(mysize)
297         if 0 != mysize % 1024:
298                 # Always round up to the next kB so that it doesn't show 0 kB when
299                 # some small file still needs to be fetched.
300                 mysize += 1024 - mysize % 1024
301         mystr=str(mysize/1024)
302         mycount=len(mystr)
303         while (mycount > 3):
304                 mycount-=3
305                 mystr=mystr[:mycount]+","+mystr[mycount:]
306         return mystr+" kB"
307
308
309 def getgccversion(chost):
310         """
311         rtype: C{str}
312         return:  the current in-use gcc version
313         """
314
315         gcc_ver_command = 'gcc -dumpversion'
316         gcc_ver_prefix = 'gcc-'
317
318         gcc_not_found_error = red(
319         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
320         "!!! to update the environment of this terminal and possibly\n" +
321         "!!! other terminals also.\n"
322         )
323
324         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
325         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
326                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
327
328         mystatus, myoutput = commands.getstatusoutput(
329                 chost + "-" + gcc_ver_command)
330         if mystatus == os.EX_OK:
331                 return gcc_ver_prefix + myoutput
332
333         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
334         if mystatus == os.EX_OK:
335                 return gcc_ver_prefix + myoutput
336
337         portage.writemsg(gcc_not_found_error, noiselevel=-1)
338         return "[unavailable]"
339
340 def getportageversion(portdir, target_root, profile, chost, vardb):
341         profilever = "unavailable"
342         if profile:
343                 realpath = os.path.realpath(profile)
344                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
345                 if realpath.startswith(basepath):
346                         profilever = realpath[1 + len(basepath):]
347                 else:
348                         try:
349                                 profilever = "!" + os.readlink(profile)
350                         except (OSError):
351                                 pass
352                 del realpath, basepath
353
354         libcver=[]
355         libclist  = vardb.match("virtual/libc")
356         libclist += vardb.match("virtual/glibc")
357         libclist  = portage.util.unique_array(libclist)
358         for x in libclist:
359                 xs=portage.catpkgsplit(x)
360                 if libcver:
361                         libcver+=","+"-".join(xs[1:])
362                 else:
363                         libcver="-".join(xs[1:])
364         if libcver==[]:
365                 libcver="unavailable"
366
367         gccver = getgccversion(chost)
368         unameout=platform.release()+" "+platform.machine()
369
370         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
371
372 def create_depgraph_params(myopts, myaction):
373         #configure emerge engine parameters
374         #
375         # self:      include _this_ package regardless of if it is merged.
376         # selective: exclude the package if it is merged
377         # recurse:   go into the dependencies
378         # deep:      go into the dependencies of already merged packages
379         # empty:     pretend nothing is merged
380         # complete:  completely account for all known dependencies
381         # remove:    build graph for use in removing packages
382         myparams = set(["recurse"])
383
384         if myaction == "remove":
385                 myparams.add("remove")
386                 myparams.add("complete")
387                 return myparams
388
389         if "--update" in myopts or \
390                 "--newuse" in myopts or \
391                 "--reinstall" in myopts or \
392                 "--noreplace" in myopts:
393                 myparams.add("selective")
394         if "--emptytree" in myopts:
395                 myparams.add("empty")
396                 myparams.discard("selective")
397         if "--nodeps" in myopts:
398                 myparams.discard("recurse")
399         if "--deep" in myopts:
400                 myparams.add("deep")
401         if "--complete-graph" in myopts:
402                 myparams.add("complete")
403         return myparams
404
405 # search functionality
406 class search(object):
407
408         #
409         # class constants
410         #
411         VERSION_SHORT=1
412         VERSION_RELEASE=2
413
414         #
415         # public interface
416         #
417         def __init__(self, root_config, spinner, searchdesc,
418                 verbose, usepkg, usepkgonly):
419                 """Searches the available and installed packages for the supplied search key.
420                 The list of available and installed packages is created at object instantiation.
421                 This makes successive searches faster."""
422                 self.settings = root_config.settings
423                 self.vartree = root_config.trees["vartree"]
424                 self.spinner = spinner
425                 self.verbose = verbose
426                 self.searchdesc = searchdesc
427                 self.root_config = root_config
428                 self.setconfig = root_config.setconfig
429                 self.matches = {"pkg" : []}
430                 self.mlen = 0
431
432                 def fake_portdb():
433                         pass
434                 self.portdb = fake_portdb
435                 for attrib in ("aux_get", "cp_all",
436                         "xmatch", "findname", "getFetchMap"):
437                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
438
439                 self._dbs = []
440
441                 portdb = root_config.trees["porttree"].dbapi
442                 bindb = root_config.trees["bintree"].dbapi
443                 vardb = root_config.trees["vartree"].dbapi
444
445                 if not usepkgonly and portdb._have_root_eclass_dir:
446                         self._dbs.append(portdb)
447
448                 if (usepkg or usepkgonly) and bindb.cp_all():
449                         self._dbs.append(bindb)
450
451                 self._dbs.append(vardb)
452                 self._portdb = portdb
453
454         def _cp_all(self):
455                 cp_all = set()
456                 for db in self._dbs:
457                         cp_all.update(db.cp_all())
458                 return list(sorted(cp_all))
459
460         def _aux_get(self, *args, **kwargs):
461                 for db in self._dbs:
462                         try:
463                                 return db.aux_get(*args, **kwargs)
464                         except KeyError:
465                                 pass
466                 raise
467
468         def _findname(self, *args, **kwargs):
469                 for db in self._dbs:
470                         if db is not self._portdb:
471                                 # We don't want findname to return anything
472                                 # unless it's an ebuild in a portage tree.
473                                 # Otherwise, it's already built and we don't
474                                 # care about it.
475                                 continue
476                         func = getattr(db, "findname", None)
477                         if func:
478                                 value = func(*args, **kwargs)
479                                 if value:
480                                         return value
481                 return None
482
483         def _getFetchMap(self, *args, **kwargs):
484                 for db in self._dbs:
485                         func = getattr(db, "getFetchMap", None)
486                         if func:
487                                 value = func(*args, **kwargs)
488                                 if value:
489                                         return value
490                 return {}
491
492         def _visible(self, db, cpv, metadata):
493                 installed = db is self.vartree.dbapi
494                 built = installed or db is not self._portdb
495                 pkg_type = "ebuild"
496                 if installed:
497                         pkg_type = "installed"
498                 elif built:
499                         pkg_type = "binary"
500                 return visible(self.settings,
501                         Package(type_name=pkg_type, root_config=self.root_config,
502                         cpv=cpv, built=built, installed=installed, metadata=metadata))
503
504         def _xmatch(self, level, atom):
505                 """
506                 This method does not expand old-style virtuals because it
507                 is restricted to returning matches for a single ${CATEGORY}/${PN}
508                 and old-style virual matches unreliable for that when querying
509                 multiple package databases. If necessary, old-style virtuals
510                 can be performed on atoms prior to calling this method.
511                 """
512                 cp = portage.dep_getkey(atom)
513                 if level == "match-all":
514                         matches = set()
515                         for db in self._dbs:
516                                 if hasattr(db, "xmatch"):
517                                         matches.update(db.xmatch(level, atom))
518                                 else:
519                                         matches.update(db.match(atom))
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "match-visible":
523                         matches = set()
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         matches.update(db.xmatch(level, atom))
527                                 else:
528                                         db_keys = list(db._aux_cache_keys)
529                                         for cpv in db.match(atom):
530                                                 metadata = izip(db_keys,
531                                                         db.aux_get(cpv, db_keys))
532                                                 if not self._visible(db, cpv, metadata):
533                                                         continue
534                                                 matches.add(cpv)
535                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
536                         db._cpv_sort_ascending(result)
537                 elif level == "bestmatch-visible":
538                         result = None
539                         for db in self._dbs:
540                                 if hasattr(db, "xmatch"):
541                                         cpv = db.xmatch("bestmatch-visible", atom)
542                                         if not cpv or portage.cpv_getkey(cpv) != cp:
543                                                 continue
544                                         if not result or cpv == portage.best([cpv, result]):
545                                                 result = cpv
546                                 else:
547                                         db_keys = Package.metadata_keys
548                                         # break out of this loop with highest visible
549                                         # match, checked in descending order
550                                         for cpv in reversed(db.match(atom)):
551                                                 if portage.cpv_getkey(cpv) != cp:
552                                                         continue
553                                                 metadata = izip(db_keys,
554                                                         db.aux_get(cpv, db_keys))
555                                                 if not self._visible(db, cpv, metadata):
556                                                         continue
557                                                 if not result or cpv == portage.best([cpv, result]):
558                                                         result = cpv
559                                                 break
560                 else:
561                         raise NotImplementedError(level)
562                 return result
563
564         def execute(self,searchkey):
565                 """Performs the search for the supplied search key"""
566                 match_category = 0
567                 self.searchkey=searchkey
568                 self.packagematches = []
569                 if self.searchdesc:
570                         self.searchdesc=1
571                         self.matches = {"pkg":[], "desc":[], "set":[]}
572                 else:
573                         self.searchdesc=0
574                         self.matches = {"pkg":[], "set":[]}
575                 print "Searching...   ",
576
577                 regexsearch = False
578                 if self.searchkey.startswith('%'):
579                         regexsearch = True
580                         self.searchkey = self.searchkey[1:]
581                 if self.searchkey.startswith('@'):
582                         match_category = 1
583                         self.searchkey = self.searchkey[1:]
584                 if regexsearch:
585                         self.searchre=re.compile(self.searchkey,re.I)
586                 else:
587                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
588                 for package in self.portdb.cp_all():
589                         self.spinner.update()
590
591                         if match_category:
592                                 match_string  = package[:]
593                         else:
594                                 match_string  = package.split("/")[-1]
595
596                         masked=0
597                         if self.searchre.search(match_string):
598                                 if not self.portdb.xmatch("match-visible", package):
599                                         masked=1
600                                 self.matches["pkg"].append([package,masked])
601                         elif self.searchdesc: # DESCRIPTION searching
602                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
603                                 if not full_package:
604                                         #no match found; we don't want to query description
605                                         full_package = portage.best(
606                                                 self.portdb.xmatch("match-all", package))
607                                         if not full_package:
608                                                 continue
609                                         else:
610                                                 masked=1
611                                 try:
612                                         full_desc = self.portdb.aux_get(
613                                                 full_package, ["DESCRIPTION"])[0]
614                                 except KeyError:
615                                         print "emerge: search: aux_get() failed, skipping"
616                                         continue
617                                 if self.searchre.search(full_desc):
618                                         self.matches["desc"].append([full_package,masked])
619
620                 self.sdict = self.setconfig.getSets()
621                 for setname in self.sdict:
622                         self.spinner.update()
623                         if match_category:
624                                 match_string = setname
625                         else:
626                                 match_string = setname.split("/")[-1]
627                         
628                         if self.searchre.search(match_string):
629                                 self.matches["set"].append([setname, False])
630                         elif self.searchdesc:
631                                 if self.searchre.search(
632                                         self.sdict[setname].getMetadata("DESCRIPTION")):
633                                         self.matches["set"].append([setname, False])
634                         
635                 self.mlen=0
636                 for mtype in self.matches:
637                         self.matches[mtype].sort()
638                         self.mlen += len(self.matches[mtype])
639
640         def addCP(self, cp):
641                 if not self.portdb.xmatch("match-all", cp):
642                         return
643                 masked = 0
644                 if not self.portdb.xmatch("bestmatch-visible", cp):
645                         masked = 1
646                 self.matches["pkg"].append([cp, masked])
647                 self.mlen += 1
648
649         def output(self):
650                 """Outputs the results of the search."""
651                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
652                 print "[ Applications found : "+white(str(self.mlen))+" ]"
653                 print " "
654                 vardb = self.vartree.dbapi
655                 for mtype in self.matches:
656                         for match,masked in self.matches[mtype]:
657                                 full_package = None
658                                 if mtype == "pkg":
659                                         catpack = match
660                                         full_package = self.portdb.xmatch(
661                                                 "bestmatch-visible", match)
662                                         if not full_package:
663                                                 #no match found; we don't want to query description
664                                                 masked=1
665                                                 full_package = portage.best(
666                                                         self.portdb.xmatch("match-all",match))
667                                 elif mtype == "desc":
668                                         full_package = match
669                                         match        = portage.cpv_getkey(match)
670                                 elif mtype == "set":
671                                         print green("*")+"  "+white(match)
672                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
673                                         print
674                                 if full_package:
675                                         try:
676                                                 desc, homepage, license = self.portdb.aux_get(
677                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
678                                         except KeyError:
679                                                 print "emerge: search: aux_get() failed, skipping"
680                                                 continue
681                                         if masked:
682                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
683                                         else:
684                                                 print green("*")+"  "+white(match)
685                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
686
687                                         mysum = [0,0]
688                                         file_size_str = None
689                                         mycat = match.split("/")[0]
690                                         mypkg = match.split("/")[1]
691                                         mycpv = match + "-" + myversion
692                                         myebuild = self.portdb.findname(mycpv)
693                                         if myebuild:
694                                                 pkgdir = os.path.dirname(myebuild)
695                                                 from portage import manifest
696                                                 mf = manifest.Manifest(
697                                                         pkgdir, self.settings["DISTDIR"])
698                                                 try:
699                                                         uri_map = self.portdb.getFetchMap(mycpv)
700                                                 except portage.exception.InvalidDependString, e:
701                                                         file_size_str = "Unknown (%s)" % (e,)
702                                                         del e
703                                                 else:
704                                                         try:
705                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
706                                                         except KeyError, e:
707                                                                 file_size_str = "Unknown (missing " + \
708                                                                         "digest for %s)" % (e,)
709                                                                 del e
710
711                                         available = False
712                                         for db in self._dbs:
713                                                 if db is not vardb and \
714                                                         db.cpv_exists(mycpv):
715                                                         available = True
716                                                         if not myebuild and hasattr(db, "bintree"):
717                                                                 myebuild = db.bintree.getname(mycpv)
718                                                                 try:
719                                                                         mysum[0] = os.stat(myebuild).st_size
720                                                                 except OSError:
721                                                                         myebuild = None
722                                                         break
723
724                                         if myebuild and file_size_str is None:
725                                                 mystr = str(mysum[0] / 1024)
726                                                 mycount = len(mystr)
727                                                 while (mycount > 3):
728                                                         mycount -= 3
729                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
730                                                 file_size_str = mystr + " kB"
731
732                                         if self.verbose:
733                                                 if available:
734                                                         print "     ", darkgreen("Latest version available:"),myversion
735                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
736                                                 if myebuild:
737                                                         print "      %s %s" % \
738                                                                 (darkgreen("Size of files:"), file_size_str)
739                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
740                                                 print "     ", darkgreen("Description:")+"  ",desc
741                                                 print "     ", darkgreen("License:")+"      ",license
742                                                 print
743         #
744         # private interface
745         #
746         def getInstallationStatus(self,package):
747                 installed_package = self.vartree.dep_bestmatch(package)
748                 result = ""
749                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
750                 if len(version) > 0:
751                         result = darkgreen("Latest version installed:")+" "+version
752                 else:
753                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
754                 return result
755
756         def getVersion(self,full_package,detail):
757                 if len(full_package) > 1:
758                         package_parts = portage.catpkgsplit(full_package)
759                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
760                                 result = package_parts[2]+ "-" + package_parts[3]
761                         else:
762                                 result = package_parts[2]
763                 else:
764                         result = ""
765                 return result
766
767 class RootConfig(object):
768         """This is used internally by depgraph to track information about a
769         particular $ROOT."""
770
771         pkg_tree_map = {
772                 "ebuild"    : "porttree",
773                 "binary"    : "bintree",
774                 "installed" : "vartree"
775         }
776
777         tree_pkg_map = {}
778         for k, v in pkg_tree_map.iteritems():
779                 tree_pkg_map[v] = k
780
781         def __init__(self, settings, trees, setconfig):
782                 self.trees = trees
783                 self.settings = settings
784                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
785                 self.root = self.settings["ROOT"]
786                 self.setconfig = setconfig
787                 self.sets = self.setconfig.getSets()
788                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
789
790 def create_world_atom(pkg, args_set, root_config):
791         """Create a new atom for the world file if one does not exist.  If the
792         argument atom is precise enough to identify a specific slot then a slot
793         atom will be returned. Atoms that are in the system set may also be stored
794         in world since system atoms can only match one slot while world atoms can
795         be greedy with respect to slots.  Unslotted system packages will not be
796         stored in world."""
797
798         arg_atom = args_set.findAtomForPackage(pkg)
799         if not arg_atom:
800                 return None
801         cp = portage.dep_getkey(arg_atom)
802         new_world_atom = cp
803         sets = root_config.sets
804         portdb = root_config.trees["porttree"].dbapi
805         vardb = root_config.trees["vartree"].dbapi
806         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
807                 for cpv in portdb.match(cp))
808         slotted = len(available_slots) > 1 or \
809                 (len(available_slots) == 1 and "0" not in available_slots)
810         if not slotted:
811                 # check the vdb in case this is multislot
812                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
813                         for cpv in vardb.match(cp))
814                 slotted = len(available_slots) > 1 or \
815                         (len(available_slots) == 1 and "0" not in available_slots)
816         if slotted and arg_atom != cp:
817                 # If the user gave a specific atom, store it as a
818                 # slot atom in the world file.
819                 slot_atom = pkg.slot_atom
820
821                 # For USE=multislot, there are a couple of cases to
822                 # handle here:
823                 #
824                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
825                 #    unknown value, so just record an unslotted atom.
826                 #
827                 # 2) SLOT comes from an installed package and there is no
828                 #    matching SLOT in the portage tree.
829                 #
830                 # Make sure that the slot atom is available in either the
831                 # portdb or the vardb, since otherwise the user certainly
832                 # doesn't want the SLOT atom recorded in the world file
833                 # (case 1 above).  If it's only available in the vardb,
834                 # the user may be trying to prevent a USE=multislot
835                 # package from being removed by --depclean (case 2 above).
836
837                 mydb = portdb
838                 if not portdb.match(slot_atom):
839                         # SLOT seems to come from an installed multislot package
840                         mydb = vardb
841                 # If there is no installed package matching the SLOT atom,
842                 # it probably changed SLOT spontaneously due to USE=multislot,
843                 # so just record an unslotted atom.
844                 if vardb.match(slot_atom):
845                         # Now verify that the argument is precise
846                         # enough to identify a specific slot.
847                         matches = mydb.match(arg_atom)
848                         matched_slots = set()
849                         for cpv in matches:
850                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
851                         if len(matched_slots) == 1:
852                                 new_world_atom = slot_atom
853
854         if new_world_atom == sets["world"].findAtomForPackage(pkg):
855                 # Both atoms would be identical, so there's nothing to add.
856                 return None
857         if not slotted:
858                 # Unlike world atoms, system atoms are not greedy for slots, so they
859                 # can't be safely excluded from world if they are slotted.
860                 system_atom = sets["system"].findAtomForPackage(pkg)
861                 if system_atom:
862                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
863                                 return None
864                         # System virtuals aren't safe to exclude from world since they can
865                         # match multiple old-style virtuals but only one of them will be
866                         # pulled in by update or depclean.
867                         providers = portdb.mysettings.getvirtuals().get(
868                                 portage.dep_getkey(system_atom))
869                         if providers and len(providers) == 1 and providers[0] == cp:
870                                 return None
871         return new_world_atom
872
873 def filter_iuse_defaults(iuse):
874         for flag in iuse:
875                 if flag.startswith("+") or flag.startswith("-"):
876                         yield flag[1:]
877                 else:
878                         yield flag
879
880 class SlotObject(object):
881         __slots__ = ("__weakref__",)
882
883         def __init__(self, **kwargs):
884                 classes = [self.__class__]
885                 while classes:
886                         c = classes.pop()
887                         if c is SlotObject:
888                                 continue
889                         classes.extend(c.__bases__)
890                         slots = getattr(c, "__slots__", None)
891                         if not slots:
892                                 continue
893                         for myattr in slots:
894                                 myvalue = kwargs.get(myattr, None)
895                                 setattr(self, myattr, myvalue)
896
897         def copy(self):
898                 """
899                 Create a new instance and copy all attributes
900                 defined from __slots__ (including those from
901                 inherited classes).
902                 """
903                 obj = self.__class__()
904
905                 classes = [self.__class__]
906                 while classes:
907                         c = classes.pop()
908                         if c is SlotObject:
909                                 continue
910                         classes.extend(c.__bases__)
911                         slots = getattr(c, "__slots__", None)
912                         if not slots:
913                                 continue
914                         for myattr in slots:
915                                 setattr(obj, myattr, getattr(self, myattr))
916
917                 return obj
918
919 class AbstractDepPriority(SlotObject):
920         __slots__ = ("buildtime", "runtime", "runtime_post")
921
922         def __lt__(self, other):
923                 return self.__int__() < other
924
925         def __le__(self, other):
926                 return self.__int__() <= other
927
928         def __eq__(self, other):
929                 return self.__int__() == other
930
931         def __ne__(self, other):
932                 return self.__int__() != other
933
934         def __gt__(self, other):
935                 return self.__int__() > other
936
937         def __ge__(self, other):
938                 return self.__int__() >= other
939
940         def copy(self):
941                 import copy
942                 return copy.copy(self)
943
944 class DepPriority(AbstractDepPriority):
945         """
946                 This class generates an integer priority level based of various
947                 attributes of the dependency relationship.  Attributes can be assigned
948                 at any time and the new integer value will be generated on calls to the
949                 __int__() method.  Rich comparison operators are supported.
950
951                 The boolean attributes that affect the integer value are "satisfied",
952                 "buildtime", "runtime", and "system".  Various combinations of
953                 attributes lead to the following priority levels:
954
955                 Combination of properties           Priority  Category
956
957                 not satisfied and buildtime            0       HARD
958                 not satisfied and runtime             -1       MEDIUM
959                 not satisfied and runtime_post        -2       MEDIUM_SOFT
960                 satisfied and buildtime and rebuild   -3       SOFT
961                 satisfied and buildtime               -4       SOFT
962                 satisfied and runtime                 -5       SOFT
963                 satisfied and runtime_post            -6       SOFT
964                 (none of the above)                   -6       SOFT
965
966                 Several integer constants are defined for categorization of priority
967                 levels:
968
969                 MEDIUM   The upper boundary for medium dependencies.
970                 MEDIUM_SOFT   The upper boundary for medium-soft dependencies.
971                 SOFT     The upper boundary for soft dependencies.
972                 MIN      The lower boundary for soft dependencies.
973         """
974         __slots__ = ("satisfied", "rebuild")
975         MEDIUM = -1
976         MEDIUM_SOFT = -2
977         SOFT   = -3
978         MIN    = -6
979
980         def __int__(self):
981                 if not self.satisfied:
982                         if self.buildtime:
983                                 return 0
984                         if self.runtime:
985                                 return -1
986                         if self.runtime_post:
987                                 return -2
988                 if self.buildtime:
989                         if self.rebuild:
990                                 return -3
991                         return -4
992                 if self.runtime:
993                         return -5
994                 if self.runtime_post:
995                         return -6
996                 return -6
997
998         def __str__(self):
999                 myvalue = self.__int__()
1000                 if myvalue > self.MEDIUM:
1001                         return "hard"
1002                 if myvalue > self.MEDIUM_SOFT:
1003                         return "medium"
1004                 if myvalue > self.SOFT:
1005                         return "medium-soft"
1006                 return "soft"
1007
1008 class BlockerDepPriority(DepPriority):
1009         __slots__ = ()
1010         def __int__(self):
1011                 return 0
1012
1013 BlockerDepPriority.instance = BlockerDepPriority()
1014
1015 class UnmergeDepPriority(AbstractDepPriority):
1016         __slots__ = ("satisfied",)
1017         """
1018         Combination of properties           Priority  Category
1019
1020         runtime                                0       HARD
1021         runtime_post                          -1       HARD
1022         buildtime                             -2       SOFT
1023         (none of the above)                   -2       SOFT
1024         """
1025
1026         MAX    =  0
1027         SOFT   = -2
1028         MIN    = -2
1029
1030         def __int__(self):
1031                 if self.runtime:
1032                         return 0
1033                 if self.runtime_post:
1034                         return -1
1035                 if self.buildtime:
1036                         return -2
1037                 return -2
1038
1039         def __str__(self):
1040                 myvalue = self.__int__()
1041                 if myvalue > self.SOFT:
1042                         return "hard"
1043                 return "soft"
1044
1045 class FakeVartree(portage.vartree):
1046         """This is implements an in-memory copy of a vartree instance that provides
1047         all the interfaces required for use by the depgraph.  The vardb is locked
1048         during the constructor call just long enough to read a copy of the
1049         installed package information.  This allows the depgraph to do it's
1050         dependency calculations without holding a lock on the vardb.  It also
1051         allows things like vardb global updates to be done in memory so that the
1052         user doesn't necessarily need write access to the vardb in cases where
1053         global updates are necessary (updates are performed when necessary if there
1054         is not a matching ebuild in the tree)."""
1055         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1056                 self._root_config = root_config
1057                 if pkg_cache is None:
1058                         pkg_cache = {}
1059                 real_vartree = root_config.trees["vartree"]
1060                 portdb = root_config.trees["porttree"].dbapi
1061                 self.root = real_vartree.root
1062                 self.settings = real_vartree.settings
1063                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1064                 self._pkg_cache = pkg_cache
1065                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1066                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1067                 try:
1068                         # At least the parent needs to exist for the lock file.
1069                         portage.util.ensure_dirs(vdb_path)
1070                 except portage.exception.PortageException:
1071                         pass
1072                 vdb_lock = None
1073                 try:
1074                         if acquire_lock and os.access(vdb_path, os.W_OK):
1075                                 vdb_lock = portage.locks.lockdir(vdb_path)
1076                         real_dbapi = real_vartree.dbapi
1077                         slot_counters = {}
1078                         for cpv in real_dbapi.cpv_all():
1079                                 cache_key = ("installed", self.root, cpv, "nomerge")
1080                                 pkg = self._pkg_cache.get(cache_key)
1081                                 if pkg is not None:
1082                                         metadata = pkg.metadata
1083                                 else:
1084                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1085                                 myslot = metadata["SLOT"]
1086                                 mycp = portage.dep_getkey(cpv)
1087                                 myslot_atom = "%s:%s" % (mycp, myslot)
1088                                 try:
1089                                         mycounter = long(metadata["COUNTER"])
1090                                 except ValueError:
1091                                         mycounter = 0
1092                                         metadata["COUNTER"] = str(mycounter)
1093                                 other_counter = slot_counters.get(myslot_atom, None)
1094                                 if other_counter is not None:
1095                                         if other_counter > mycounter:
1096                                                 continue
1097                                 slot_counters[myslot_atom] = mycounter
1098                                 if pkg is None:
1099                                         pkg = Package(built=True, cpv=cpv,
1100                                                 installed=True, metadata=metadata,
1101                                                 root_config=root_config, type_name="installed")
1102                                 self._pkg_cache[pkg] = pkg
1103                                 self.dbapi.cpv_inject(pkg)
1104                         real_dbapi.flush_cache()
1105                 finally:
1106                         if vdb_lock:
1107                                 portage.locks.unlockdir(vdb_lock)
1108                 # Populate the old-style virtuals using the cached values.
1109                 if not self.settings.treeVirtuals:
1110                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1111                                 portage.getCPFromCPV, self.get_all_provides())
1112
1113                 # Intialize variables needed for lazy cache pulls of the live ebuild
1114                 # metadata.  This ensures that the vardb lock is released ASAP, without
1115                 # being delayed in case cache generation is triggered.
1116                 self._aux_get = self.dbapi.aux_get
1117                 self.dbapi.aux_get = self._aux_get_wrapper
1118                 self._match = self.dbapi.match
1119                 self.dbapi.match = self._match_wrapper
1120                 self._aux_get_history = set()
1121                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1122                 self._portdb = portdb
1123                 self._global_updates = None
1124
1125         def _match_wrapper(self, cpv, use_cache=1):
1126                 """
1127                 Make sure the metadata in Package instances gets updated for any
1128                 cpv that is returned from a match() call, since the metadata can
1129                 be accessed directly from the Package instance instead of via
1130                 aux_get().
1131                 """
1132                 matches = self._match(cpv, use_cache=use_cache)
1133                 for cpv in matches:
1134                         if cpv in self._aux_get_history:
1135                                 continue
1136                         self._aux_get_wrapper(cpv, [])
1137                 return matches
1138
1139         def _aux_get_wrapper(self, pkg, wants):
1140                 if pkg in self._aux_get_history:
1141                         return self._aux_get(pkg, wants)
1142                 self._aux_get_history.add(pkg)
1143                 try:
1144                         # Use the live ebuild metadata if possible.
1145                         live_metadata = dict(izip(self._portdb_keys,
1146                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1147                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1148                                 raise KeyError(pkg)
1149                         self.dbapi.aux_update(pkg, live_metadata)
1150                 except (KeyError, portage.exception.PortageException):
1151                         if self._global_updates is None:
1152                                 self._global_updates = \
1153                                         grab_global_updates(self._portdb.porttree_root)
1154                         perform_global_updates(
1155                                 pkg, self.dbapi, self._global_updates)
1156                 return self._aux_get(pkg, wants)
1157
1158         def sync(self, acquire_lock=1):
1159                 """
1160                 Call this method to synchronize state with the real vardb
1161                 after one or more packages may have been installed or
1162                 uninstalled.
1163                 """
1164                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1165                 try:
1166                         # At least the parent needs to exist for the lock file.
1167                         portage.util.ensure_dirs(vdb_path)
1168                 except portage.exception.PortageException:
1169                         pass
1170                 vdb_lock = None
1171                 try:
1172                         if acquire_lock and os.access(vdb_path, os.W_OK):
1173                                 vdb_lock = portage.locks.lockdir(vdb_path)
1174                         self._sync()
1175                 finally:
1176                         if vdb_lock:
1177                                 portage.locks.unlockdir(vdb_lock)
1178
1179         def _sync(self):
1180
1181                 real_vardb = self._root_config.trees["vartree"].dbapi
1182                 current_cpv_set = frozenset(real_vardb.cpv_all())
1183                 pkg_vardb = self.dbapi
1184                 aux_get_history = self._aux_get_history
1185
1186                 # Remove any packages that have been uninstalled.
1187                 for pkg in list(pkg_vardb):
1188                         if pkg.cpv not in current_cpv_set:
1189                                 pkg_vardb.cpv_remove(pkg)
1190                                 aux_get_history.discard(pkg.cpv)
1191
1192                 # Validate counters and timestamps.
1193                 slot_counters = {}
1194                 root = self.root
1195                 validation_keys = ["COUNTER", "_mtime_"]
1196                 for cpv in current_cpv_set:
1197
1198                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1199                         pkg = pkg_vardb.get(pkg_hash_key)
1200                         if pkg is not None:
1201                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1202
1203                                 if counter != pkg.metadata["COUNTER"] or \
1204                                         mtime != pkg.mtime:
1205                                         pkg_vardb.cpv_remove(pkg)
1206                                         aux_get_history.discard(pkg.cpv)
1207                                         pkg = None
1208
1209                         if pkg is None:
1210                                 pkg = self._pkg(cpv)
1211
1212                         other_counter = slot_counters.get(pkg.slot_atom)
1213                         if other_counter is not None:
1214                                 if other_counter > pkg.counter:
1215                                         continue
1216
1217                         slot_counters[pkg.slot_atom] = pkg.counter
1218                         pkg_vardb.cpv_inject(pkg)
1219
1220                 real_vardb.flush_cache()
1221
1222         def _pkg(self, cpv):
1223                 root_config = self._root_config
1224                 real_vardb = root_config.trees["vartree"].dbapi
1225                 db_keys = list(real_vardb._aux_cache_keys)
1226                 pkg = Package(cpv=cpv, installed=True,
1227                         metadata=izip(db_keys, real_vardb.aux_get(cpv, db_keys)),
1228                         root_config=root_config,
1229                         type_name="installed")
1230                 return pkg
1231
1232 def grab_global_updates(portdir):
1233         from portage.update import grab_updates, parse_updates
1234         updpath = os.path.join(portdir, "profiles", "updates")
1235         try:
1236                 rawupdates = grab_updates(updpath)
1237         except portage.exception.DirectoryNotFound:
1238                 rawupdates = []
1239         upd_commands = []
1240         for mykey, mystat, mycontent in rawupdates:
1241                 commands, errors = parse_updates(mycontent)
1242                 upd_commands.extend(commands)
1243         return upd_commands
1244
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246         from portage.update import update_dbentries
1247         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249         updates = update_dbentries(mycommands, aux_dict)
1250         if updates:
1251                 mydb.aux_update(mycpv, updates)
1252
1253 def visible(pkgsettings, pkg):
1254         """
1255         Check if a package is visible. This can raise an InvalidDependString
1256         exception if LICENSE is invalid.
1257         TODO: optionally generate a list of masking reasons
1258         @rtype: Boolean
1259         @returns: True if the package is visible, False otherwise.
1260         """
1261         if not pkg.metadata["SLOT"]:
1262                 return False
1263         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264                 if not pkgsettings._accept_chost(pkg):
1265                         return False
1266         eapi = pkg.metadata["EAPI"]
1267         if not portage.eapi_is_supported(eapi):
1268                 return False
1269         if not pkg.installed:
1270                 if portage._eapi_is_deprecated(eapi):
1271                         return False
1272                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1273                         return False
1274         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1275                 return False
1276         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1277                 return False
1278         try:
1279                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1280                         return False
1281         except portage.exception.InvalidDependString:
1282                 return False
1283         return True
1284
1285 def get_masking_status(pkg, pkgsettings, root_config):
1286
1287         mreasons = portage.getmaskingstatus(
1288                 pkg, settings=pkgsettings,
1289                 portdb=root_config.trees["porttree"].dbapi)
1290
1291         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292                 if not pkgsettings._accept_chost(pkg):
1293                         mreasons.append("CHOST: %s" % \
1294                                 pkg.metadata["CHOST"])
1295
1296         if not pkg.metadata["SLOT"]:
1297                 mreasons.append("invalid: SLOT is undefined")
1298
1299         return mreasons
1300
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302         db, pkg_type, built, installed, db_keys):
1303         eapi_masked = False
1304         try:
1305                 metadata = dict(izip(db_keys,
1306                         db.aux_get(cpv, db_keys)))
1307         except KeyError:
1308                 metadata = None
1309         if metadata and not built:
1310                 pkgsettings.setcpv(cpv, mydb=metadata)
1311                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312         if metadata is None:
1313                 mreasons = ["corruption"]
1314         else:
1315                 pkg = Package(type_name=pkg_type, root_config=root_config,
1316                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1317                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318         return metadata, mreasons
1319
1320 def show_masked_packages(masked_packages):
1321         shown_licenses = set()
1322         shown_comments = set()
1323         # Maybe there is both an ebuild and a binary. Only
1324         # show one of them to avoid redundant appearance.
1325         shown_cpvs = set()
1326         have_eapi_mask = False
1327         for (root_config, pkgsettings, cpv,
1328                 metadata, mreasons) in masked_packages:
1329                 if cpv in shown_cpvs:
1330                         continue
1331                 shown_cpvs.add(cpv)
1332                 comment, filename = None, None
1333                 if "package.mask" in mreasons:
1334                         comment, filename = \
1335                                 portage.getmaskingreason(
1336                                 cpv, metadata=metadata,
1337                                 settings=pkgsettings,
1338                                 portdb=root_config.trees["porttree"].dbapi,
1339                                 return_location=True)
1340                 missing_licenses = []
1341                 if metadata:
1342                         if not portage.eapi_is_supported(metadata["EAPI"]):
1343                                 have_eapi_mask = True
1344                         try:
1345                                 missing_licenses = \
1346                                         pkgsettings._getMissingLicenses(
1347                                                 cpv, metadata)
1348                         except portage.exception.InvalidDependString:
1349                                 # This will have already been reported
1350                                 # above via mreasons.
1351                                 pass
1352
1353                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354                 if comment and comment not in shown_comments:
1355                         print filename+":"
1356                         print comment
1357                         shown_comments.add(comment)
1358                 portdb = root_config.trees["porttree"].dbapi
1359                 for l in missing_licenses:
1360                         l_path = portdb.findLicensePath(l)
1361                         if l in shown_licenses:
1362                                 continue
1363                         msg = ("A copy of the '%s' license" + \
1364                         " is located at '%s'.") % (l, l_path)
1365                         print msg
1366                         print
1367                         shown_licenses.add(l)
1368         return have_eapi_mask
1369
1370 class Task(SlotObject):
1371         __slots__ = ("_hash_key", "_hash_value")
1372
1373         def _get_hash_key(self):
1374                 hash_key = getattr(self, "_hash_key", None)
1375                 if hash_key is None:
1376                         raise NotImplementedError(self)
1377                 return hash_key
1378
1379         def __eq__(self, other):
1380                 return self._get_hash_key() == other
1381
1382         def __ne__(self, other):
1383                 return self._get_hash_key() != other
1384
1385         def __hash__(self):
1386                 hash_value = getattr(self, "_hash_value", None)
1387                 if hash_value is None:
1388                         self._hash_value = hash(self._get_hash_key())
1389                 return self._hash_value
1390
1391         def __len__(self):
1392                 return len(self._get_hash_key())
1393
1394         def __getitem__(self, key):
1395                 return self._get_hash_key()[key]
1396
1397         def __iter__(self):
1398                 return iter(self._get_hash_key())
1399
1400         def __contains__(self, key):
1401                 return key in self._get_hash_key()
1402
1403         def __str__(self):
1404                 return str(self._get_hash_key())
1405
1406 class Blocker(Task):
1407
1408         __hash__ = Task.__hash__
1409         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1410
1411         def __init__(self, **kwargs):
1412                 Task.__init__(self, **kwargs)
1413                 self.cp = portage.dep_getkey(self.atom)
1414
1415         def _get_hash_key(self):
1416                 hash_key = getattr(self, "_hash_key", None)
1417                 if hash_key is None:
1418                         self._hash_key = \
1419                                 ("blocks", self.root, self.atom, self.eapi)
1420                 return self._hash_key
1421
1422 class Package(Task):
1423
1424         __hash__ = Task.__hash__
1425         __slots__ = ("built", "cpv", "depth",
1426                 "installed", "metadata", "onlydeps", "operation",
1427                 "root_config", "type_name",
1428                 "category", "counter", "cp", "cpv_split",
1429                 "inherited", "iuse", "mtime",
1430                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1431
1432         metadata_keys = [
1433                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434                 "INHERITED", "IUSE", "KEYWORDS",
1435                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1437
1438         def __init__(self, **kwargs):
1439                 Task.__init__(self, **kwargs)
1440                 self.root = self.root_config.root
1441                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442                 self.cp = portage.cpv_getkey(self.cpv)
1443                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444                 self.category, self.pf = portage.catsplit(self.cpv)
1445                 self.cpv_split = portage.catpkgsplit(self.cpv)
1446                 self.pv_split = self.cpv_split[1:]
1447
1448         class _use(object):
1449
1450                 __slots__ = ("__weakref__", "enabled")
1451
1452                 def __init__(self, use):
1453                         self.enabled = frozenset(use)
1454
1455         class _iuse(object):
1456
1457                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1458
1459                 def __init__(self, tokens, iuse_implicit):
1460                         self.tokens = tuple(tokens)
1461                         self.iuse_implicit = iuse_implicit
1462                         enabled = []
1463                         disabled = []
1464                         other = []
1465                         for x in tokens:
1466                                 prefix = x[:1]
1467                                 if prefix == "+":
1468                                         enabled.append(x[1:])
1469                                 elif prefix == "-":
1470                                         disabled.append(x[1:])
1471                                 else:
1472                                         other.append(x)
1473                         self.enabled = frozenset(enabled)
1474                         self.disabled = frozenset(disabled)
1475                         self.all = frozenset(chain(enabled, disabled, other))
1476
1477                 def __getattribute__(self, name):
1478                         if name == "regex":
1479                                 try:
1480                                         return object.__getattribute__(self, "regex")
1481                                 except AttributeError:
1482                                         all = object.__getattribute__(self, "all")
1483                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484                                         # Escape anything except ".*" which is supposed
1485                                         # to pass through from _get_implicit_iuse()
1486                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487                                         regex = "^(%s)$" % "|".join(regex)
1488                                         regex = regex.replace("\\.\\*", ".*")
1489                                         self.regex = re.compile(regex)
1490                         return object.__getattribute__(self, name)
1491
1492         def _get_hash_key(self):
1493                 hash_key = getattr(self, "_hash_key", None)
1494                 if hash_key is None:
1495                         if self.operation is None:
1496                                 self.operation = "merge"
1497                                 if self.onlydeps or self.installed:
1498                                         self.operation = "nomerge"
1499                         self._hash_key = \
1500                                 (self.type_name, self.root, self.cpv, self.operation)
1501                 return self._hash_key
1502
1503         def __cmp__(self, other):
1504                 if self > other:
1505                         return 1
1506                 elif self < other:
1507                         return -1
1508                 return 0
1509
1510         def __lt__(self, other):
1511                 if other.cp != self.cp:
1512                         return False
1513                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1514                         return True
1515                 return False
1516
1517         def __le__(self, other):
1518                 if other.cp != self.cp:
1519                         return False
1520                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1521                         return True
1522                 return False
1523
1524         def __gt__(self, other):
1525                 if other.cp != self.cp:
1526                         return False
1527                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1528                         return True
1529                 return False
1530
1531         def __ge__(self, other):
1532                 if other.cp != self.cp:
1533                         return False
1534                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1535                         return True
1536                 return False
1537
1538 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1539         if not x.startswith("UNUSED_"))
1540 _all_metadata_keys.discard("CDEPEND")
1541 _all_metadata_keys.update(Package.metadata_keys)
1542
1543 from portage.cache.mappings import slot_dict_class
1544 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1545
1546 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1547         """
1548         Detect metadata updates and synchronize Package attributes.
1549         """
1550
1551         __slots__ = ("_pkg",)
1552         _wrapped_keys = frozenset(
1553                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1554
1555         def __init__(self, pkg, metadata):
1556                 _PackageMetadataWrapperBase.__init__(self)
1557                 self._pkg = pkg
1558                 self.update(metadata)
1559
1560         def __setitem__(self, k, v):
1561                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1562                 if k in self._wrapped_keys:
1563                         getattr(self, "_set_" + k.lower())(k, v)
1564
1565         def _set_inherited(self, k, v):
1566                 if isinstance(v, basestring):
1567                         v = frozenset(v.split())
1568                 self._pkg.inherited = v
1569
1570         def _set_iuse(self, k, v):
1571                 self._pkg.iuse = self._pkg._iuse(
1572                         v.split(), self._pkg.root_config.iuse_implicit)
1573
1574         def _set_slot(self, k, v):
1575                 self._pkg.slot = v
1576
1577         def _set_use(self, k, v):
1578                 self._pkg.use = self._pkg._use(v.split())
1579
1580         def _set_counter(self, k, v):
1581                 if isinstance(v, basestring):
1582                         try:
1583                                 v = int(v.strip())
1584                         except ValueError:
1585                                 v = 0
1586                 self._pkg.counter = v
1587
1588         def _set__mtime_(self, k, v):
1589                 if isinstance(v, basestring):
1590                         try:
1591                                 v = float(v.strip())
1592                         except ValueError:
1593                                 v = 0
1594                 self._pkg.mtime = v
1595
1596 class EbuildFetchonly(SlotObject):
1597
1598         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1599
1600         def execute(self):
1601                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1602                 # ensuring sane $PWD (bug #239560) and storing elog
1603                 # messages. Use a private temp directory, in order
1604                 # to avoid locking the main one.
1605                 settings = self.settings
1606                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1607                 from tempfile import mkdtemp
1608                 try:
1609                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1610                 except OSError, e:
1611                         if e.errno != portage.exception.PermissionDenied.errno:
1612                                 raise
1613                         raise portage.exception.PermissionDenied(global_tmpdir)
1614                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1615                 settings.backup_changes("PORTAGE_TMPDIR")
1616                 try:
1617                         retval = self._execute()
1618                 finally:
1619                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1620                         settings.backup_changes("PORTAGE_TMPDIR")
1621                         shutil.rmtree(private_tmpdir)
1622                 return retval
1623
1624         def _execute(self):
1625                 settings = self.settings
1626                 pkg = self.pkg
1627                 root_config = pkg.root_config
1628                 portdb = root_config.trees["porttree"].dbapi
1629                 ebuild_path = portdb.findname(pkg.cpv)
1630                 settings.setcpv(pkg)
1631                 debug = settings.get("PORTAGE_DEBUG") == "1"
1632                 use_cache = 1 # always true
1633                 portage.doebuild_environment(ebuild_path, "fetch",
1634                         root_config.root, settings, debug, use_cache, portdb)
1635                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1636
1637                 retval = portage.doebuild(ebuild_path, "fetch",
1638                         self.settings["ROOT"], self.settings, debug=debug,
1639                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1640                         mydbapi=portdb, tree="porttree")
1641
1642                 if retval != os.EX_OK:
1643                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1644                         eerror(msg, phase="unpack", key=pkg.cpv)
1645
1646                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1647                 return retval
1648
1649 class AsynchronousTask(SlotObject):
1650         """
1651         Subclasses override _wait() and _poll() so that calls
1652         to public methods can be wrapped for implementing
1653         hooks such as exit listener notification.
1654
1655         Sublasses should call self.wait() to notify exit listeners after
1656         the task is complete and self.returncode has been set.
1657         """
1658
1659         __slots__ = ("background", "cancelled", "returncode") + \
1660                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1661
1662         def start(self):
1663                 """
1664                 Start an asynchronous task and then return as soon as possible.
1665                 """
1666                 self._start()
1667                 self._start_hook()
1668
1669         def _start(self):
1670                 raise NotImplementedError(self)
1671
1672         def isAlive(self):
1673                 return self.returncode is None
1674
1675         def poll(self):
1676                 self._wait_hook()
1677                 return self._poll()
1678
1679         def _poll(self):
1680                 return self.returncode
1681
1682         def wait(self):
1683                 if self.returncode is None:
1684                         self._wait()
1685                 self._wait_hook()
1686                 return self.returncode
1687
1688         def _wait(self):
1689                 return self.returncode
1690
1691         def cancel(self):
1692                 self.cancelled = True
1693                 self.wait()
1694
1695         def addStartListener(self, f):
1696                 """
1697                 The function will be called with one argument, a reference to self.
1698                 """
1699                 if self._start_listeners is None:
1700                         self._start_listeners = []
1701                 self._start_listeners.append(f)
1702
1703         def removeStartListener(self, f):
1704                 if self._start_listeners is None:
1705                         return
1706                 self._start_listeners.remove(f)
1707
1708         def _start_hook(self):
1709                 if self._start_listeners is not None:
1710                         start_listeners = self._start_listeners
1711                         self._start_listeners = None
1712
1713                         for f in start_listeners:
1714                                 f(self)
1715
1716         def addExitListener(self, f):
1717                 """
1718                 The function will be called with one argument, a reference to self.
1719                 """
1720                 if self._exit_listeners is None:
1721                         self._exit_listeners = []
1722                 self._exit_listeners.append(f)
1723
1724         def removeExitListener(self, f):
1725                 if self._exit_listeners is None:
1726                         if self._exit_listener_stack is not None:
1727                                 self._exit_listener_stack.remove(f)
1728                         return
1729                 self._exit_listeners.remove(f)
1730
1731         def _wait_hook(self):
1732                 """
1733                 Call this method after the task completes, just before returning
1734                 the returncode from wait() or poll(). This hook is
1735                 used to trigger exit listeners when the returncode first
1736                 becomes available.
1737                 """
1738                 if self.returncode is not None and \
1739                         self._exit_listeners is not None:
1740
1741                         # This prevents recursion, in case one of the
1742                         # exit handlers triggers this method again by
1743                         # calling wait(). Use a stack that gives
1744                         # removeExitListener() an opportunity to consume
1745                         # listeners from the stack, before they can get
1746                         # called below. This is necessary because a call
1747                         # to one exit listener may result in a call to
1748                         # removeExitListener() for another listener on
1749                         # the stack. That listener needs to be removed
1750                         # from the stack since it would be inconsistent
1751                         # to call it after it has been been passed into
1752                         # removeExitListener().
1753                         self._exit_listener_stack = self._exit_listeners
1754                         self._exit_listeners = None
1755
1756                         self._exit_listener_stack.reverse()
1757                         while self._exit_listener_stack:
1758                                 self._exit_listener_stack.pop()(self)
1759
1760 class PipeReader(AsynchronousTask):
1761
1762         """
1763         Reads output from one or more files and saves it in memory,
1764         for retrieval via the getvalue() method. This is driven by
1765         the scheduler's poll() loop, so it runs entirely within the
1766         current process.
1767         """
1768
1769         __slots__ = ("input_files", "scheduler",) + \
1770                 ("pid", "_read_data", "_registered", "_reg_ids")
1771
1772         _bufsize = 4096
1773
1774         def _start(self):
1775                 self._reg_ids = set()
1776                 self._read_data = []
1777                 for k, f in self.input_files.iteritems():
1778                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1779                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1780                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1781                                 PollConstants.POLLIN, self._output_handler))
1782                 self._registered = True
1783
1784         def isAlive(self):
1785                 return self._registered
1786
1787         def _wait(self):
1788                 if self.returncode is not None:
1789                         return self.returncode
1790
1791                 if self._registered:
1792                         self.scheduler.schedule(self._reg_ids)
1793                         self._unregister()
1794
1795                 self.returncode = os.EX_OK
1796                 return self.returncode
1797
1798         def getvalue(self):
1799                 """Retrieve the entire contents"""
1800                 return "".join(self._read_data)
1801
1802         def close(self):
1803                 """Free the memory buffer."""
1804                 self._read_data = None
1805
1806         def _output_handler(self, fd, event):
1807                 files = self.input_files
1808                 for f in files.itervalues():
1809                         if fd == f.fileno():
1810                                 break
1811
1812                 buf = array.array('B')
1813                 try:
1814                         buf.fromfile(f, self._bufsize)
1815                 except EOFError:
1816                         pass
1817
1818                 if buf:
1819                         self._read_data.append(buf.tostring())
1820                 else:
1821                         self._unregister()
1822                         self.wait()
1823
1824                 return self._registered
1825
1826         def _unregister(self):
1827                 """
1828                 Unregister from the scheduler and close open files.
1829                 """
1830
1831                 self._registered = False
1832
1833                 if self._reg_ids is not None:
1834                         for reg_id in self._reg_ids:
1835                                 self.scheduler.unregister(reg_id)
1836                         self._reg_ids = None
1837
1838                 if self.input_files is not None:
1839                         for f in self.input_files.itervalues():
1840                                 f.close()
1841                         self.input_files = None
1842
1843 class CompositeTask(AsynchronousTask):
1844
1845         __slots__ = ("scheduler",) + ("_current_task",)
1846
1847         def isAlive(self):
1848                 return self._current_task is not None
1849
1850         def cancel(self):
1851                 self.cancelled = True
1852                 if self._current_task is not None:
1853                         self._current_task.cancel()
1854
1855         def _poll(self):
1856                 """
1857                 This does a loop calling self._current_task.poll()
1858                 repeatedly as long as the value of self._current_task
1859                 keeps changing. It calls poll() a maximum of one time
1860                 for a given self._current_task instance. This is useful
1861                 since calling poll() on a task can trigger advance to
1862                 the next task could eventually lead to the returncode
1863                 being set in cases when polling only a single task would
1864                 not have the same effect.
1865                 """
1866
1867                 prev = None
1868                 while True:
1869                         task = self._current_task
1870                         if task is None or task is prev:
1871                                 # don't poll the same task more than once
1872                                 break
1873                         task.poll()
1874                         prev = task
1875
1876                 return self.returncode
1877
1878         def _wait(self):
1879
1880                 prev = None
1881                 while True:
1882                         task = self._current_task
1883                         if task is None:
1884                                 # don't wait for the same task more than once
1885                                 break
1886                         if task is prev:
1887                                 # Before the task.wait() method returned, an exit
1888                                 # listener should have set self._current_task to either
1889                                 # a different task or None. Something is wrong.
1890                                 raise AssertionError("self._current_task has not " + \
1891                                         "changed since calling wait", self, task)
1892                         task.wait()
1893                         prev = task
1894
1895                 return self.returncode
1896
1897         def _assert_current(self, task):
1898                 """
1899                 Raises an AssertionError if the given task is not the
1900                 same one as self._current_task. This can be useful
1901                 for detecting bugs.
1902                 """
1903                 if task is not self._current_task:
1904                         raise AssertionError("Unrecognized task: %s" % (task,))
1905
1906         def _default_exit(self, task):
1907                 """
1908                 Calls _assert_current() on the given task and then sets the
1909                 composite returncode attribute if task.returncode != os.EX_OK.
1910                 If the task failed then self._current_task will be set to None.
1911                 Subclasses can use this as a generic task exit callback.
1912
1913                 @rtype: int
1914                 @returns: The task.returncode attribute.
1915                 """
1916                 self._assert_current(task)
1917                 if task.returncode != os.EX_OK:
1918                         self.returncode = task.returncode
1919                         self._current_task = None
1920                 return task.returncode
1921
1922         def _final_exit(self, task):
1923                 """
1924                 Assumes that task is the final task of this composite task.
1925                 Calls _default_exit() and sets self.returncode to the task's
1926                 returncode and sets self._current_task to None.
1927                 """
1928                 self._default_exit(task)
1929                 self._current_task = None
1930                 self.returncode = task.returncode
1931                 return self.returncode
1932
1933         def _default_final_exit(self, task):
1934                 """
1935                 This calls _final_exit() and then wait().
1936
1937                 Subclasses can use this as a generic final task exit callback.
1938
1939                 """
1940                 self._final_exit(task)
1941                 return self.wait()
1942
1943         def _start_task(self, task, exit_handler):
1944                 """
1945                 Register exit handler for the given task, set it
1946                 as self._current_task, and call task.start().
1947
1948                 Subclasses can use this as a generic way to start
1949                 a task.
1950
1951                 """
1952                 task.addExitListener(exit_handler)
1953                 self._current_task = task
1954                 task.start()
1955
1956 class TaskSequence(CompositeTask):
1957         """
1958         A collection of tasks that executes sequentially. Each task
1959         must have a addExitListener() method that can be used as
1960         a means to trigger movement from one task to the next.
1961         """
1962
1963         __slots__ = ("_task_queue",)
1964
1965         def __init__(self, **kwargs):
1966                 AsynchronousTask.__init__(self, **kwargs)
1967                 self._task_queue = deque()
1968
1969         def add(self, task):
1970                 self._task_queue.append(task)
1971
1972         def _start(self):
1973                 self._start_next_task()
1974
1975         def cancel(self):
1976                 self._task_queue.clear()
1977                 CompositeTask.cancel(self)
1978
1979         def _start_next_task(self):
1980                 self._start_task(self._task_queue.popleft(),
1981                         self._task_exit_handler)
1982
1983         def _task_exit_handler(self, task):
1984                 if self._default_exit(task) != os.EX_OK:
1985                         self.wait()
1986                 elif self._task_queue:
1987                         self._start_next_task()
1988                 else:
1989                         self._final_exit(task)
1990                         self.wait()
1991
1992 class SubProcess(AsynchronousTask):
1993
1994         __slots__ = ("scheduler",) + ("pid", "_files", "_registered", "_reg_id")
1995
1996         # A file descriptor is required for the scheduler to monitor changes from
1997         # inside a poll() loop. When logging is not enabled, create a pipe just to
1998         # serve this purpose alone.
1999         _dummy_pipe_fd = 9
2000
2001         def _poll(self):
2002                 if self.returncode is not None:
2003                         return self.returncode
2004                 if self.pid is None:
2005                         return self.returncode
2006                 if self._registered:
2007                         return self.returncode
2008
2009                 try:
2010                         retval = os.waitpid(self.pid, os.WNOHANG)
2011                 except OSError, e:
2012                         if e.errno != errno.ECHILD:
2013                                 raise
2014                         del e
2015                         retval = (self.pid, 1)
2016
2017                 if retval == (0, 0):
2018                         return None
2019                 self._set_returncode(retval)
2020                 return self.returncode
2021
2022         def cancel(self):
2023                 if self.isAlive():
2024                         try:
2025                                 os.kill(self.pid, signal.SIGTERM)
2026                         except OSError, e:
2027                                 if e.errno != errno.ESRCH:
2028                                         raise
2029                                 del e
2030
2031                 self.cancelled = True
2032                 if self.pid is not None:
2033                         self.wait()
2034                 return self.returncode
2035
2036         def isAlive(self):
2037                 return self.pid is not None and \
2038                         self.returncode is None
2039
2040         def _wait(self):
2041
2042                 if self.returncode is not None:
2043                         return self.returncode
2044
2045                 if self._registered:
2046                         self.scheduler.schedule(self._reg_id)
2047                         self._unregister()
2048                         if self.returncode is not None:
2049                                 return self.returncode
2050
2051                 try:
2052                         wait_retval = os.waitpid(self.pid, 0)
2053                 except OSError, e:
2054                         if e.errno != errno.ECHILD:
2055                                 raise
2056                         del e
2057                         self._set_returncode((self.pid, 1))
2058                 else:
2059                         self._set_returncode(wait_retval)
2060
2061                 return self.returncode
2062
2063         def _unregister(self):
2064                 """
2065                 Unregister from the scheduler and close open files.
2066                 """
2067
2068                 self._registered = False
2069
2070                 if self._reg_id is not None:
2071                         self.scheduler.unregister(self._reg_id)
2072                         self._reg_id = None
2073
2074                 if self._files is not None:
2075                         for f in self._files.itervalues():
2076                                 f.close()
2077                         self._files = None
2078
2079         def _set_returncode(self, wait_retval):
2080
2081                 retval = wait_retval[1]
2082
2083                 if retval != os.EX_OK:
2084                         if retval & 0xff:
2085                                 retval = (retval & 0xff) << 8
2086                         else:
2087                                 retval = retval >> 8
2088
2089                 self.returncode = retval
2090
2091 class SpawnProcess(SubProcess):
2092
2093         """
2094         Constructor keyword args are passed into portage.process.spawn().
2095         The required "args" keyword argument will be passed as the first
2096         spawn() argument.
2097         """
2098
2099         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2100                 "uid", "gid", "groups", "umask", "logfile",
2101                 "path_lookup", "pre_exec")
2102
2103         __slots__ = ("args",) + \
2104                 _spawn_kwarg_names
2105
2106         _file_names = ("log", "process", "stdout")
2107         _files_dict = slot_dict_class(_file_names, prefix="")
2108         _bufsize = 4096
2109
2110         def _start(self):
2111
2112                 if self.cancelled:
2113                         return
2114
2115                 if self.fd_pipes is None:
2116                         self.fd_pipes = {}
2117                 fd_pipes = self.fd_pipes
2118                 fd_pipes.setdefault(0, sys.stdin.fileno())
2119                 fd_pipes.setdefault(1, sys.stdout.fileno())
2120                 fd_pipes.setdefault(2, sys.stderr.fileno())
2121
2122                 # flush any pending output
2123                 for fd in fd_pipes.itervalues():
2124                         if fd == sys.stdout.fileno():
2125                                 sys.stdout.flush()
2126                         if fd == sys.stderr.fileno():
2127                                 sys.stderr.flush()
2128
2129                 logfile = self.logfile
2130                 self._files = self._files_dict()
2131                 files = self._files
2132
2133                 master_fd, slave_fd = self._pipe(fd_pipes)
2134                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2135                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2136
2137                 null_input = None
2138                 fd_pipes_orig = fd_pipes.copy()
2139                 if self.background:
2140                         # TODO: Use job control functions like tcsetpgrp() to control
2141                         # access to stdin. Until then, use /dev/null so that any
2142                         # attempts to read from stdin will immediately return EOF
2143                         # instead of blocking indefinitely.
2144                         null_input = open('/dev/null', 'rb')
2145                         fd_pipes[0] = null_input.fileno()
2146                 else:
2147                         fd_pipes[0] = fd_pipes_orig[0]
2148
2149                 files.process = os.fdopen(master_fd, 'r')
2150                 if logfile is not None:
2151
2152                         fd_pipes[1] = slave_fd
2153                         fd_pipes[2] = slave_fd
2154
2155                         files.log = open(logfile, "a")
2156                         portage.util.apply_secpass_permissions(logfile,
2157                                 uid=portage.portage_uid, gid=portage.portage_gid,
2158                                 mode=0660)
2159
2160                         if not self.background:
2161                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2162
2163                         output_handler = self._output_handler
2164
2165                 else:
2166
2167                         # Create a dummy pipe so the scheduler can monitor
2168                         # the process from inside a poll() loop.
2169                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2170                         if self.background:
2171                                 fd_pipes[1] = slave_fd
2172                                 fd_pipes[2] = slave_fd
2173                         output_handler = self._dummy_handler
2174
2175                 kwargs = {}
2176                 for k in self._spawn_kwarg_names:
2177                         v = getattr(self, k)
2178                         if v is not None:
2179                                 kwargs[k] = v
2180
2181                 kwargs["fd_pipes"] = fd_pipes
2182                 kwargs["returnpid"] = True
2183                 kwargs.pop("logfile", None)
2184
2185                 retval = self._spawn(self.args, **kwargs)
2186
2187                 os.close(slave_fd)
2188                 if null_input is not None:
2189                         null_input.close()
2190
2191                 if isinstance(retval, int):
2192                         # spawn failed
2193                         for f in files.values():
2194                                 f.close()
2195                         self.returncode = retval
2196                         self.wait()
2197                         return
2198
2199                 self.pid = retval[0]
2200                 portage.process.spawned_pids.remove(self.pid)
2201
2202                 self._reg_id = self.scheduler.register(files.process.fileno(),
2203                         PollConstants.POLLIN, output_handler)
2204                 self._registered = True
2205
2206         def _pipe(self, fd_pipes):
2207                 """
2208                 @type fd_pipes: dict
2209                 @param fd_pipes: pipes from which to copy terminal size if desired.
2210                 """
2211                 return os.pipe()
2212
2213         def _spawn(self, args, **kwargs):
2214                 return portage.process.spawn(args, **kwargs)
2215
2216         def _output_handler(self, fd, event):
2217                 files = self._files
2218                 buf = array.array('B')
2219                 try:
2220                         buf.fromfile(files.process, self._bufsize)
2221                 except EOFError:
2222                         pass
2223                 if buf:
2224                         if not self.background:
2225                                 buf.tofile(files.stdout)
2226                                 files.stdout.flush()
2227                         buf.tofile(files.log)
2228                         files.log.flush()
2229                 else:
2230                         self._unregister()
2231                         self.wait()
2232                 return self._registered
2233
2234         def _dummy_handler(self, fd, event):
2235                 """
2236                 This method is mainly interested in detecting EOF, since
2237                 the only purpose of the pipe is to allow the scheduler to
2238                 monitor the process from inside a poll() loop.
2239                 """
2240                 files = self._files
2241                 buf = array.array('B')
2242                 try:
2243                         buf.fromfile(files.process, self._bufsize)
2244                 except EOFError:
2245                         pass
2246                 if buf:
2247                         pass
2248                 else:
2249                         self._unregister()
2250                         self.wait()
2251                 return self._registered
2252
2253 class MiscFunctionsProcess(SpawnProcess):
2254         """
2255         Spawns misc-functions.sh with an existing ebuild environment.
2256         """
2257
2258         __slots__ = ("commands", "phase", "pkg", "settings")
2259
2260         def _start(self):
2261                 settings = self.settings
2262                 settings.pop("EBUILD_PHASE", None)
2263                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2264                 misc_sh_binary = os.path.join(portage_bin_path,
2265                         os.path.basename(portage.const.MISC_SH_BINARY))
2266
2267                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2268                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2269
2270                 portage._doebuild_exit_status_unlink(
2271                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2272
2273                 SpawnProcess._start(self)
2274
2275         def _spawn(self, args, **kwargs):
2276                 settings = self.settings
2277                 debug = settings.get("PORTAGE_DEBUG") == "1"
2278                 return portage.spawn(" ".join(args), settings,
2279                         debug=debug, **kwargs)
2280
2281         def _set_returncode(self, wait_retval):
2282                 SpawnProcess._set_returncode(self, wait_retval)
2283                 self.returncode = portage._doebuild_exit_status_check_and_log(
2284                         self.settings, self.phase, self.returncode)
2285
2286 class EbuildFetcher(SpawnProcess):
2287
2288         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2289                 ("_build_dir",)
2290
2291         def _start(self):
2292
2293                 root_config = self.pkg.root_config
2294                 portdb = root_config.trees["porttree"].dbapi
2295                 ebuild_path = portdb.findname(self.pkg.cpv)
2296                 settings = self.config_pool.allocate()
2297                 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2298                 self._build_dir.lock()
2299                 self._build_dir.clean()
2300                 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2301                 if self.logfile is None:
2302                         self.logfile = settings.get("PORTAGE_LOG_FILE")
2303
2304                 phase = "fetch"
2305                 if self.fetchall:
2306                         phase = "fetchall"
2307
2308                 # If any incremental variables have been overridden
2309                 # via the environment, those values need to be passed
2310                 # along here so that they are correctly considered by
2311                 # the config instance in the subproccess.
2312                 fetch_env = os.environ.copy()
2313
2314                 fetch_env["PORTAGE_NICENESS"] = "0"
2315                 if self.prefetch:
2316                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2317
2318                 ebuild_binary = os.path.join(
2319                         settings["PORTAGE_BIN_PATH"], "ebuild")
2320
2321                 fetch_args = [ebuild_binary, ebuild_path, phase]
2322                 debug = settings.get("PORTAGE_DEBUG") == "1"
2323                 if debug:
2324                         fetch_args.append("--debug")
2325
2326                 self.args = fetch_args
2327                 self.env = fetch_env
2328                 SpawnProcess._start(self)
2329
2330         def _pipe(self, fd_pipes):
2331                 """When appropriate, use a pty so that fetcher progress bars,
2332                 like wget has, will work properly."""
2333                 if self.background or not sys.stdout.isatty():
2334                         # When the output only goes to a log file,
2335                         # there's no point in creating a pty.
2336                         return os.pipe()
2337                 stdout_pipe = fd_pipes.get(1)
2338                 got_pty, master_fd, slave_fd = \
2339                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2340                 return (master_fd, slave_fd)
2341
2342         def _set_returncode(self, wait_retval):
2343                 SpawnProcess._set_returncode(self, wait_retval)
2344                 # Collect elog messages that might have been
2345                 # created by the pkg_nofetch phase.
2346                 if self._build_dir is not None:
2347                         # Skip elog messages for prefetch, in order to avoid duplicates.
2348                         if not self.prefetch and self.returncode != os.EX_OK:
2349                                 elog_out = None
2350                                 if self.logfile is not None:
2351                                         if self.background:
2352                                                 elog_out = open(self.logfile, 'a')
2353                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2354                                 if self.logfile is not None:
2355                                         msg += ", Log file:"
2356                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2357                                 if self.logfile is not None:
2358                                         eerror(" '%s'" % (self.logfile,),
2359                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2360                                 if elog_out is not None:
2361                                         elog_out.close()
2362                         if not self.prefetch:
2363                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2364                         features = self._build_dir.settings.features
2365                         if self.returncode == os.EX_OK:
2366                                 self._build_dir.clean()
2367                         self._build_dir.unlock()
2368                         self.config_pool.deallocate(self._build_dir.settings)
2369                         self._build_dir = None
2370
2371 class EbuildBuildDir(SlotObject):
2372
2373         __slots__ = ("dir_path", "pkg", "settings",
2374                 "locked", "_catdir", "_lock_obj")
2375
2376         def __init__(self, **kwargs):
2377                 SlotObject.__init__(self, **kwargs)
2378                 self.locked = False
2379
2380         def lock(self):
2381                 """
2382                 This raises an AlreadyLocked exception if lock() is called
2383                 while a lock is already held. In order to avoid this, call
2384                 unlock() or check whether the "locked" attribute is True
2385                 or False before calling lock().
2386                 """
2387                 if self._lock_obj is not None:
2388                         raise self.AlreadyLocked((self._lock_obj,))
2389
2390                 dir_path = self.dir_path
2391                 if dir_path is None:
2392                         root_config = self.pkg.root_config
2393                         portdb = root_config.trees["porttree"].dbapi
2394                         ebuild_path = portdb.findname(self.pkg.cpv)
2395                         settings = self.settings
2396                         settings.setcpv(self.pkg)
2397                         debug = settings.get("PORTAGE_DEBUG") == "1"
2398                         use_cache = 1 # always true
2399                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2400                                 self.settings, debug, use_cache, portdb)
2401                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2402
2403                 catdir = os.path.dirname(dir_path)
2404                 self._catdir = catdir
2405
2406                 portage.util.ensure_dirs(os.path.dirname(catdir),
2407                         gid=portage.portage_gid,
2408                         mode=070, mask=0)
2409                 catdir_lock = None
2410                 try:
2411                         catdir_lock = portage.locks.lockdir(catdir)
2412                         portage.util.ensure_dirs(catdir,
2413                                 gid=portage.portage_gid,
2414                                 mode=070, mask=0)
2415                         self._lock_obj = portage.locks.lockdir(dir_path)
2416                 finally:
2417                         self.locked = self._lock_obj is not None
2418                         if catdir_lock is not None:
2419                                 portage.locks.unlockdir(catdir_lock)
2420
2421         def clean(self):
2422                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2423                 by keepwork or keeptemp in FEATURES."""
2424                 settings = self.settings
2425                 features = settings.features
2426                 if not ("keepwork" in features or "keeptemp" in features):
2427                         try:
2428                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2429                         except EnvironmentError, e:
2430                                 if e.errno != errno.ENOENT:
2431                                         raise
2432                                 del e
2433
2434         def unlock(self):
2435                 if self._lock_obj is None:
2436                         return
2437
2438                 portage.locks.unlockdir(self._lock_obj)
2439                 self._lock_obj = None
2440                 self.locked = False
2441
2442                 catdir = self._catdir
2443                 catdir_lock = None
2444                 try:
2445                         catdir_lock = portage.locks.lockdir(catdir)
2446                 finally:
2447                         if catdir_lock:
2448                                 try:
2449                                         os.rmdir(catdir)
2450                                 except OSError, e:
2451                                         if e.errno not in (errno.ENOENT,
2452                                                 errno.ENOTEMPTY, errno.EEXIST):
2453                                                 raise
2454                                         del e
2455                                 portage.locks.unlockdir(catdir_lock)
2456
2457         class AlreadyLocked(portage.exception.PortageException):
2458                 pass
2459
2460 class EbuildBuild(CompositeTask):
2461
2462         __slots__ = ("args_set", "config_pool", "find_blockers",
2463                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2464                 "prefetcher", "settings", "world_atom") + \
2465                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2466
2467         def _start(self):
2468
2469                 logger = self.logger
2470                 opts = self.opts
2471                 pkg = self.pkg
2472                 settings = self.settings
2473                 world_atom = self.world_atom
2474                 root_config = pkg.root_config
2475                 tree = "porttree"
2476                 self._tree = tree
2477                 portdb = root_config.trees[tree].dbapi
2478                 settings["EMERGE_FROM"] = pkg.type_name
2479                 settings.backup_changes("EMERGE_FROM")
2480                 settings.reset()
2481                 ebuild_path = portdb.findname(self.pkg.cpv)
2482                 self._ebuild_path = ebuild_path
2483
2484                 prefetcher = self.prefetcher
2485                 if prefetcher is None:
2486                         pass
2487                 elif not prefetcher.isAlive():
2488                         prefetcher.cancel()
2489                 elif prefetcher.poll() is None:
2490
2491                         waiting_msg = "Fetching files " + \
2492                                 "in the background. " + \
2493                                 "To view fetch progress, run `tail -f " + \
2494                                 "/var/log/emerge-fetch.log` in another " + \
2495                                 "terminal."
2496                         msg_prefix = colorize("GOOD", " * ")
2497                         from textwrap import wrap
2498                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2499                                 for line in wrap(waiting_msg, 65))
2500                         if not self.background:
2501                                 writemsg(waiting_msg, noiselevel=-1)
2502
2503                         self._current_task = prefetcher
2504                         prefetcher.addExitListener(self._prefetch_exit)
2505                         return
2506
2507                 self._prefetch_exit(prefetcher)
2508
2509         def _prefetch_exit(self, prefetcher):
2510
2511                 opts = self.opts
2512                 pkg = self.pkg
2513                 settings = self.settings
2514
2515                 if opts.fetchonly:
2516                                 fetcher = EbuildFetchonly(
2517                                         fetch_all=opts.fetch_all_uri,
2518                                         pkg=pkg, pretend=opts.pretend,
2519                                         settings=settings)
2520                                 retval = fetcher.execute()
2521                                 self.returncode = retval
2522                                 self.wait()
2523                                 return
2524
2525                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2526                         fetchall=opts.fetch_all_uri,
2527                         fetchonly=opts.fetchonly,
2528                         background=self.background,
2529                         pkg=pkg, scheduler=self.scheduler)
2530
2531                 self._start_task(fetcher, self._fetch_exit)
2532
2533         def _fetch_exit(self, fetcher):
2534                 opts = self.opts
2535                 pkg = self.pkg
2536
2537                 fetch_failed = False
2538                 if opts.fetchonly:
2539                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2540                 else:
2541                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2542
2543                 if fetch_failed and fetcher.logfile is not None and \
2544                         os.path.exists(fetcher.logfile):
2545                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2546
2547                 if not fetch_failed and fetcher.logfile is not None:
2548                         # Fetch was successful, so remove the fetch log.
2549                         try:
2550                                 os.unlink(fetcher.logfile)
2551                         except OSError:
2552                                 pass
2553
2554                 if fetch_failed or opts.fetchonly:
2555                         self.wait()
2556                         return
2557
2558                 logger = self.logger
2559                 opts = self.opts
2560                 pkg_count = self.pkg_count
2561                 scheduler = self.scheduler
2562                 settings = self.settings
2563                 features = settings.features
2564                 ebuild_path = self._ebuild_path
2565                 system_set = pkg.root_config.sets["system"]
2566
2567                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2568                 self._build_dir.lock()
2569
2570                 # Cleaning is triggered before the setup
2571                 # phase, in portage.doebuild().
2572                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2573                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2574                 short_msg = "emerge: (%s of %s) %s Clean" % \
2575                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2576                 logger.log(msg, short_msg=short_msg)
2577
2578                 #buildsyspkg: Check if we need to _force_ binary package creation
2579                 self._issyspkg = "buildsyspkg" in features and \
2580                                 system_set.findAtomForPackage(pkg) and \
2581                                 not opts.buildpkg
2582
2583                 if opts.buildpkg or self._issyspkg:
2584
2585                         self._buildpkg = True
2586
2587                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2588                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2589                         short_msg = "emerge: (%s of %s) %s Compile" % \
2590                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2591                         logger.log(msg, short_msg=short_msg)
2592
2593                 else:
2594                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2595                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2596                         short_msg = "emerge: (%s of %s) %s Compile" % \
2597                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2598                         logger.log(msg, short_msg=short_msg)
2599
2600                 build = EbuildExecuter(background=self.background, pkg=pkg,
2601                         scheduler=scheduler, settings=settings)
2602                 self._start_task(build, self._build_exit)
2603
2604         def _unlock_builddir(self):
2605                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2606                 self._build_dir.unlock()
2607
2608         def _build_exit(self, build):
2609                 if self._default_exit(build) != os.EX_OK:
2610                         self._unlock_builddir()
2611                         self.wait()
2612                         return
2613
2614                 opts = self.opts
2615                 buildpkg = self._buildpkg
2616
2617                 if not buildpkg:
2618                         self._final_exit(build)
2619                         self.wait()
2620                         return
2621
2622                 if self._issyspkg:
2623                         msg = ">>> This is a system package, " + \
2624                                 "let's pack a rescue tarball.\n"
2625
2626                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2627                         if log_path is not None:
2628                                 log_file = open(log_path, 'a')
2629                                 try:
2630                                         log_file.write(msg)
2631                                 finally:
2632                                         log_file.close()
2633
2634                         if not self.background:
2635                                 portage.writemsg_stdout(msg, noiselevel=-1)
2636
2637                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2638                         scheduler=self.scheduler, settings=self.settings)
2639
2640                 self._start_task(packager, self._buildpkg_exit)
2641
2642         def _buildpkg_exit(self, packager):
2643                 """
2644                 Released build dir lock when there is a failure or
2645                 when in buildpkgonly mode. Otherwise, the lock will
2646                 be released when merge() is called.
2647                 """
2648
2649                 if self._default_exit(packager) == os.EX_OK and \
2650                         self.opts.buildpkgonly:
2651                         # Need to call "clean" phase for buildpkgonly mode
2652                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2653                         phase = "clean"
2654                         clean_phase = EbuildPhase(background=self.background,
2655                                 pkg=self.pkg, phase=phase,
2656                                 scheduler=self.scheduler, settings=self.settings,
2657                                 tree=self._tree)
2658                         self._start_task(clean_phase, self._clean_exit)
2659                         return
2660
2661                 if self._final_exit(packager) != os.EX_OK or \
2662                         self.opts.buildpkgonly:
2663                         self._unlock_builddir()
2664                 self.wait()
2665
2666         def _clean_exit(self, clean_phase):
2667                 if self._final_exit(clean_phase) != os.EX_OK or \
2668                         self.opts.buildpkgonly:
2669                         self._unlock_builddir()
2670                 self.wait()
2671
2672         def install(self):
2673                 """
2674                 Install the package and then clean up and release locks.
2675                 Only call this after the build has completed successfully
2676                 and neither fetchonly nor buildpkgonly mode are enabled.
2677                 """
2678
2679                 find_blockers = self.find_blockers
2680                 ldpath_mtimes = self.ldpath_mtimes
2681                 logger = self.logger
2682                 pkg = self.pkg
2683                 pkg_count = self.pkg_count
2684                 settings = self.settings
2685                 world_atom = self.world_atom
2686                 ebuild_path = self._ebuild_path
2687                 tree = self._tree
2688
2689                 merge = EbuildMerge(find_blockers=self.find_blockers,
2690                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2691                         pkg_count=pkg_count, pkg_path=ebuild_path,
2692                         scheduler=self.scheduler,
2693                         settings=settings, tree=tree, world_atom=world_atom)
2694
2695                 msg = " === (%s of %s) Merging (%s::%s)" % \
2696                         (pkg_count.curval, pkg_count.maxval,
2697                         pkg.cpv, ebuild_path)
2698                 short_msg = "emerge: (%s of %s) %s Merge" % \
2699                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2700                 logger.log(msg, short_msg=short_msg)
2701
2702                 try:
2703                         rval = merge.execute()
2704                 finally:
2705                         self._unlock_builddir()
2706
2707                 return rval
2708
2709 class EbuildExecuter(CompositeTask):
2710
2711         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2712
2713         _phases = ("prepare", "configure", "compile", "test", "install")
2714
2715         _live_eclasses = frozenset([
2716                 "bzr",
2717                 "cvs",
2718                 "darcs",
2719                 "git",
2720                 "mercurial",
2721                 "subversion"
2722         ])
2723
2724         def _start(self):
2725                 self._tree = "porttree"
2726                 pkg = self.pkg
2727                 phase = "clean"
2728                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2729                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2730                 self._start_task(clean_phase, self._clean_phase_exit)
2731
2732         def _clean_phase_exit(self, clean_phase):
2733
2734                 if self._default_exit(clean_phase) != os.EX_OK:
2735                         self.wait()
2736                         return
2737
2738                 pkg = self.pkg
2739                 scheduler = self.scheduler
2740                 settings = self.settings
2741                 cleanup = 1
2742
2743                 # This initializes PORTAGE_LOG_FILE.
2744                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2745
2746                 setup_phase = EbuildPhase(background=self.background,
2747                         pkg=pkg, phase="setup", scheduler=scheduler,
2748                         settings=settings, tree=self._tree)
2749
2750                 setup_phase.addExitListener(self._setup_exit)
2751                 self._current_task = setup_phase
2752                 self.scheduler.scheduleSetup(setup_phase)
2753
2754         def _setup_exit(self, setup_phase):
2755
2756                 if self._default_exit(setup_phase) != os.EX_OK:
2757                         self.wait()
2758                         return
2759
2760                 unpack_phase = EbuildPhase(background=self.background,
2761                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2762                         settings=self.settings, tree=self._tree)
2763
2764                 if self._live_eclasses.intersection(self.pkg.inherited):
2765                         # Serialize $DISTDIR access for live ebuilds since
2766                         # otherwise they can interfere with eachother.
2767
2768                         unpack_phase.addExitListener(self._unpack_exit)
2769                         self._current_task = unpack_phase
2770                         self.scheduler.scheduleUnpack(unpack_phase)
2771
2772                 else:
2773                         self._start_task(unpack_phase, self._unpack_exit)
2774
2775         def _unpack_exit(self, unpack_phase):
2776
2777                 if self._default_exit(unpack_phase) != os.EX_OK:
2778                         self.wait()
2779                         return
2780
2781                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2782
2783                 pkg = self.pkg
2784                 phases = self._phases
2785                 eapi = pkg.metadata["EAPI"]
2786                 if eapi in ("0", "1", "2_pre1"):
2787                         # skip src_prepare and src_configure
2788                         phases = phases[2:]
2789                 elif eapi in ("2_pre2",):
2790                         # skip src_prepare
2791                         phases = phases[1:]
2792
2793                 for phase in phases:
2794                         ebuild_phases.add(EbuildPhase(background=self.background,
2795                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2796                                 settings=self.settings, tree=self._tree))
2797
2798                 self._start_task(ebuild_phases, self._default_final_exit)
2799
2800 class EbuildMetadataPhase(SubProcess):
2801
2802         """
2803         Asynchronous interface for the ebuild "depend" phase which is
2804         used to extract metadata from the ebuild.
2805         """
2806
2807         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2808                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2809                 ("_raw_metadata",)
2810
2811         _file_names = ("ebuild",)
2812         _files_dict = slot_dict_class(_file_names, prefix="")
2813         _bufsize = SpawnProcess._bufsize
2814         _metadata_fd = 9
2815
2816         def _start(self):
2817                 settings = self.settings
2818                 settings.reset()
2819                 ebuild_path = self.ebuild_path
2820                 debug = settings.get("PORTAGE_DEBUG") == "1"
2821                 master_fd = None
2822                 slave_fd = None
2823                 fd_pipes = None
2824                 if self.fd_pipes is not None:
2825                         fd_pipes = self.fd_pipes.copy()
2826                 else:
2827                         fd_pipes = {}
2828
2829                 fd_pipes.setdefault(0, sys.stdin.fileno())
2830                 fd_pipes.setdefault(1, sys.stdout.fileno())
2831                 fd_pipes.setdefault(2, sys.stderr.fileno())
2832
2833                 # flush any pending output
2834                 for fd in fd_pipes.itervalues():
2835                         if fd == sys.stdout.fileno():
2836                                 sys.stdout.flush()
2837                         if fd == sys.stderr.fileno():
2838                                 sys.stderr.flush()
2839
2840                 fd_pipes_orig = fd_pipes.copy()
2841                 self._files = self._files_dict()
2842                 files = self._files
2843
2844                 master_fd, slave_fd = os.pipe()
2845                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2846                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2847
2848                 fd_pipes[self._metadata_fd] = slave_fd
2849
2850                 retval = portage.doebuild(ebuild_path, "depend",
2851                         settings["ROOT"], settings, debug,
2852                         mydbapi=self.portdb, tree="porttree",
2853                         fd_pipes=fd_pipes, returnpid=True)
2854
2855                 os.close(slave_fd)
2856
2857                 if isinstance(retval, int):
2858                         # doebuild failed before spawning
2859                         os.close(master_fd)
2860                         self.returncode = retval
2861                         self.wait()
2862                         return
2863
2864                 self.pid = retval[0]
2865                 portage.process.spawned_pids.remove(self.pid)
2866
2867                 self._raw_metadata = []
2868                 files.ebuild = os.fdopen(master_fd, 'r')
2869                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2870                         PollConstants.POLLIN, self._output_handler)
2871                 self._registered = True
2872
2873         def _output_handler(self, fd, event):
2874                 files = self._files
2875                 self._raw_metadata.append(files.ebuild.read())
2876                 if not self._raw_metadata[-1]:
2877                         self._unregister()
2878                         self.wait()
2879
2880                         if self.returncode == os.EX_OK:
2881                                 metadata = izip(portage.auxdbkeys,
2882                                         "".join(self._raw_metadata).splitlines())
2883                                 self.metadata_callback(self.cpv, self.ebuild_path,
2884                                         self.repo_path, metadata, self.ebuild_mtime)
2885
2886                 return self._registered
2887
2888 class EbuildProcess(SpawnProcess):
2889
2890         __slots__ = ("phase", "pkg", "settings", "tree")
2891
2892         def _start(self):
2893                 # Don't open the log file during the clean phase since the
2894                 # open file can result in an nfs lock on $T/build.log which
2895                 # prevents the clean phase from removing $T.
2896                 if self.phase not in ("clean", "cleanrm"):
2897                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2898                 SpawnProcess._start(self)
2899
2900         def _pipe(self, fd_pipes):
2901                 stdout_pipe = fd_pipes.get(1)
2902                 got_pty, master_fd, slave_fd = \
2903                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2904                 return (master_fd, slave_fd)
2905
2906         def _spawn(self, args, **kwargs):
2907
2908                 root_config = self.pkg.root_config
2909                 tree = self.tree
2910                 mydbapi = root_config.trees[tree].dbapi
2911                 settings = self.settings
2912                 ebuild_path = settings["EBUILD"]
2913                 debug = settings.get("PORTAGE_DEBUG") == "1"
2914
2915                 rval = portage.doebuild(ebuild_path, self.phase,
2916                         root_config.root, settings, debug,
2917                         mydbapi=mydbapi, tree=tree, **kwargs)
2918
2919                 return rval
2920
2921         def _set_returncode(self, wait_retval):
2922                 SpawnProcess._set_returncode(self, wait_retval)
2923
2924                 if self.phase not in ("clean", "cleanrm"):
2925                         self.returncode = portage._doebuild_exit_status_check_and_log(
2926                                 self.settings, self.phase, self.returncode)
2927
2928                 portage._post_phase_userpriv_perms(self.settings)
2929
2930 class EbuildPhase(CompositeTask):
2931
2932         __slots__ = ("background", "pkg", "phase",
2933                 "scheduler", "settings", "tree")
2934
2935         _post_phase_cmds = portage._post_phase_cmds
2936
2937         def _start(self):
2938
2939                 ebuild_process = EbuildProcess(background=self.background,
2940                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
2941                         settings=self.settings, tree=self.tree)
2942
2943                 self._start_task(ebuild_process, self._ebuild_exit)
2944
2945         def _ebuild_exit(self, ebuild_process):
2946
2947                 if self.phase == "install":
2948                         out = None
2949                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2950                         log_file = None
2951                         if self.background and log_path is not None:
2952                                 log_file = open(log_path, 'a')
2953                                 out = log_file
2954                         try:
2955                                 portage._check_build_log(self.settings, out=out)
2956                         finally:
2957                                 if log_file is not None:
2958                                         log_file.close()
2959
2960                 if self._default_exit(ebuild_process) != os.EX_OK:
2961                         self.wait()
2962                         return
2963
2964                 settings = self.settings
2965
2966                 if self.phase == "install":
2967                         portage._post_src_install_uid_fix(settings)
2968
2969                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
2970                 if post_phase_cmds is not None:
2971                         post_phase = MiscFunctionsProcess(background=self.background,
2972                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
2973                                 scheduler=self.scheduler, settings=settings)
2974                         self._start_task(post_phase, self._post_phase_exit)
2975                         return
2976
2977                 self.returncode = ebuild_process.returncode
2978                 self._current_task = None
2979                 self.wait()
2980
2981         def _post_phase_exit(self, post_phase):
2982                 if self._final_exit(post_phase) != os.EX_OK:
2983                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
2984                                 noiselevel=-1)
2985                 self._current_task = None
2986                 self.wait()
2987                 return
2988
2989 class EbuildBinpkg(EbuildProcess):
2990         """
2991         This assumes that src_install() has successfully completed.
2992         """
2993         __slots__ = ("_binpkg_tmpfile",)
2994
2995         def _start(self):
2996                 self.phase = "package"
2997                 self.tree = "porttree"
2998                 pkg = self.pkg
2999                 root_config = pkg.root_config
3000                 portdb = root_config.trees["porttree"].dbapi
3001                 bintree = root_config.trees["bintree"]
3002                 ebuild_path = portdb.findname(self.pkg.cpv)
3003                 settings = self.settings
3004                 debug = settings.get("PORTAGE_DEBUG") == "1"
3005
3006                 bintree.prevent_collision(pkg.cpv)
3007                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3008                         pkg.cpv + ".tbz2." + str(os.getpid()))
3009                 self._binpkg_tmpfile = binpkg_tmpfile
3010                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3011                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3012
3013                 try:
3014                         EbuildProcess._start(self)
3015                 finally:
3016                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3017
3018         def _set_returncode(self, wait_retval):
3019                 EbuildProcess._set_returncode(self, wait_retval)
3020
3021                 pkg = self.pkg
3022                 bintree = pkg.root_config.trees["bintree"]
3023                 binpkg_tmpfile = self._binpkg_tmpfile
3024                 if self.returncode == os.EX_OK:
3025                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3026
3027 class EbuildMerge(SlotObject):
3028
3029         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3030                 "pkg", "pkg_count", "pkg_path", "pretend",
3031                 "scheduler", "settings", "tree", "world_atom")
3032
3033         def execute(self):
3034                 root_config = self.pkg.root_config
3035                 settings = self.settings
3036                 retval = portage.merge(settings["CATEGORY"],
3037                         settings["PF"], settings["D"],
3038                         os.path.join(settings["PORTAGE_BUILDDIR"],
3039                         "build-info"), root_config.root, settings,
3040                         myebuild=settings["EBUILD"],
3041                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3042                         vartree=root_config.trees["vartree"],
3043                         prev_mtimes=self.ldpath_mtimes,
3044                         scheduler=self.scheduler,
3045                         blockers=self.find_blockers)
3046
3047                 if retval == os.EX_OK:
3048                         self.world_atom(self.pkg)
3049                         self._log_success()
3050
3051                 return retval
3052
3053         def _log_success(self):
3054                 pkg = self.pkg
3055                 pkg_count = self.pkg_count
3056                 pkg_path = self.pkg_path
3057                 logger = self.logger
3058                 if "noclean" not in self.settings.features:
3059                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3060                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3061                         logger.log((" === (%s of %s) " + \
3062                                 "Post-Build Cleaning (%s::%s)") % \
3063                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3064                                 short_msg=short_msg)
3065                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3066                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3067
3068 class PackageUninstall(AsynchronousTask):
3069
3070         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3071
3072         def _start(self):
3073                 try:
3074                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3075                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3076                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3077                                 writemsg_level=self._writemsg_level)
3078                 except UninstallFailure, e:
3079                         self.returncode = e.status
3080                 else:
3081                         self.returncode = os.EX_OK
3082                 self.wait()
3083
3084         def _writemsg_level(self, msg, level=0, noiselevel=0):
3085
3086                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3087                 background = self.background
3088
3089                 if log_path is None:
3090                         if not (background and level < logging.WARNING):
3091                                 portage.util.writemsg_level(msg,
3092                                         level=level, noiselevel=noiselevel)
3093                 else:
3094                         if not background:
3095                                 portage.util.writemsg_level(msg,
3096                                         level=level, noiselevel=noiselevel)
3097
3098                         f = open(log_path, 'a')
3099                         try:
3100                                 f.write(msg)
3101                         finally:
3102                                 f.close()
3103
3104 class Binpkg(CompositeTask):
3105
3106         __slots__ = ("find_blockers",
3107                 "ldpath_mtimes", "logger", "opts",
3108                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3109                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3110                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3111
3112         def _writemsg_level(self, msg, level=0, noiselevel=0):
3113
3114                 if not self.background:
3115                         portage.util.writemsg_level(msg,
3116                                 level=level, noiselevel=noiselevel)
3117
3118                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3119                 if  log_path is not None:
3120                         f = open(log_path, 'a')
3121                         try:
3122                                 f.write(msg)
3123                         finally:
3124                                 f.close()
3125
3126         def _start(self):
3127
3128                 pkg = self.pkg
3129                 settings = self.settings
3130                 settings.setcpv(pkg)
3131                 self._tree = "bintree"
3132                 self._bintree = self.pkg.root_config.trees[self._tree]
3133                 self._verify = "strict" in self.settings.features and \
3134                         not self.opts.pretend
3135
3136                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3137                         "portage", pkg.category, pkg.pf)
3138                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3139                         pkg=pkg, settings=settings)
3140                 self._image_dir = os.path.join(dir_path, "image")
3141                 self._infloc = os.path.join(dir_path, "build-info")
3142                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3143
3144                 # The prefetcher has already completed or it
3145                 # could be running now. If it's running now,
3146                 # wait for it to complete since it holds
3147                 # a lock on the file being fetched. The
3148                 # portage.locks functions are only designed
3149                 # to work between separate processes. Since
3150                 # the lock is held by the current process,
3151                 # use the scheduler and fetcher methods to
3152                 # synchronize with the fetcher.
3153                 prefetcher = self.prefetcher
3154                 if prefetcher is None:
3155                         pass
3156                 elif not prefetcher.isAlive():
3157                         prefetcher.cancel()
3158                 elif prefetcher.poll() is None:
3159
3160                         waiting_msg = ("Fetching '%s' " + \
3161                                 "in the background. " + \
3162                                 "To view fetch progress, run `tail -f " + \
3163                                 "/var/log/emerge-fetch.log` in another " + \
3164                                 "terminal.") % prefetcher.pkg_path
3165                         msg_prefix = colorize("GOOD", " * ")
3166                         from textwrap import wrap
3167                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3168                                 for line in wrap(waiting_msg, 65))
3169                         if not self.background:
3170                                 writemsg(waiting_msg, noiselevel=-1)
3171
3172                         self._current_task = prefetcher
3173                         prefetcher.addExitListener(self._prefetch_exit)
3174                         return
3175
3176                 self._prefetch_exit(prefetcher)
3177
3178         def _prefetch_exit(self, prefetcher):
3179
3180                 pkg = self.pkg
3181                 pkg_count = self.pkg_count
3182                 fetcher = BinpkgFetcher(background=self.background,
3183                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3184                         scheduler=self.scheduler)
3185                 pkg_path = fetcher.pkg_path
3186                 self._pkg_path = pkg_path
3187
3188                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3189
3190                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3191                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3192                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3193                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3194                         self.logger.log(msg, short_msg=short_msg)
3195
3196                         if self.background:
3197                                 fetcher.addExitListener(self._fetcher_exit)
3198                                 self._current_task = fetcher
3199                                 self.scheduler.fetch.schedule(fetcher)
3200                         else:
3201                                 self._start_task(fetcher, self._fetcher_exit)
3202                         return
3203
3204                 self._fetcher_exit(fetcher)
3205
3206         def _fetcher_exit(self, fetcher):
3207
3208                 # The fetcher only has a returncode when
3209                 # --getbinpkg is enabled.
3210                 if fetcher.returncode is not None:
3211                         self._fetched_pkg = True
3212                         if self.opts.fetchonly:
3213                                 self._final_exit(fetcher)
3214                                 self.wait()
3215                                 return
3216                         elif self._default_exit(fetcher) != os.EX_OK:
3217                                 self.wait()
3218                                 return
3219
3220                 verifier = None
3221                 if self._verify:
3222                         verifier = BinpkgVerifier(background=self.background,
3223                                 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3224
3225                         if self.background:
3226                                 verifier.addExitListener(self._verifier_exit)
3227                                 self._current_task = verifier
3228                                 self.scheduler.fetch.schedule(verifier)
3229                         else:
3230                                 self._start_task(verifier, self._verifier_exit)
3231                         return
3232
3233                 self._verifier_exit(verifier)
3234
3235         def _verifier_exit(self, verifier):
3236                 if verifier is not None and \
3237                         self._default_exit(verifier) != os.EX_OK:
3238                         self.wait()
3239                         return
3240
3241                 logger = self.logger
3242                 pkg = self.pkg
3243                 pkg_count = self.pkg_count
3244                 pkg_path = self._pkg_path
3245
3246                 if self._fetched_pkg:
3247                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3248
3249                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3250                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3251                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3252                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3253                 logger.log(msg, short_msg=short_msg)
3254
3255                 self._build_dir.lock()
3256
3257                 phase = "clean"
3258                 settings = self.settings
3259                 settings.setcpv(pkg)
3260                 settings["EBUILD"] = self._ebuild_path
3261                 ebuild_phase = EbuildPhase(background=self.background,
3262                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3263                         settings=settings, tree=self._tree)
3264
3265                 self._start_task(ebuild_phase, self._clean_exit)
3266
3267         def _clean_exit(self, clean_phase):
3268                 if self._default_exit(clean_phase) != os.EX_OK:
3269                         self._unlock_builddir()
3270                         self.wait()
3271                         return
3272
3273                 dir_path = self._build_dir.dir_path
3274
3275                 try:
3276                         shutil.rmtree(dir_path)
3277                 except (IOError, OSError), e:
3278                         if e.errno != errno.ENOENT:
3279                                 raise
3280                         del e
3281
3282                 infloc = self._infloc
3283                 pkg = self.pkg
3284                 pkg_path = self._pkg_path
3285
3286                 dir_mode = 0755
3287                 for mydir in (dir_path, self._image_dir, infloc):
3288                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3289                                 gid=portage.data.portage_gid, mode=dir_mode)
3290
3291                 # This initializes PORTAGE_LOG_FILE.
3292                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3293                 self._writemsg_level(">>> Extracting info\n")
3294
3295                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3296                 check_missing_metadata = ("CATEGORY", "PF")
3297                 missing_metadata = set()
3298                 for k in check_missing_metadata:
3299                         v = pkg_xpak.getfile(k)
3300                         if not v:
3301                                 missing_metadata.add(k)
3302
3303                 pkg_xpak.unpackinfo(infloc)
3304                 for k in missing_metadata:
3305                         if k == "CATEGORY":
3306                                 v = pkg.category
3307                         elif k == "PF":
3308                                 v = pkg.pf
3309                         else:
3310                                 continue
3311
3312                         f = open(os.path.join(infloc, k), 'wb')
3313                         try:
3314                                 f.write(v + "\n")
3315                         finally:
3316                                 f.close()
3317
3318                 # Store the md5sum in the vdb.
3319                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3320                 try:
3321                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3322                 finally:
3323                         f.close()
3324
3325                 # This gives bashrc users an opportunity to do various things
3326                 # such as remove binary packages after they're installed.
3327                 settings = self.settings
3328                 settings.setcpv(self.pkg)
3329                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3330                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3331
3332                 phase = "setup"
3333                 setup_phase = EbuildPhase(background=self.background,
3334                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3335                         settings=settings, tree=self._tree)
3336
3337                 setup_phase.addExitListener(self._setup_exit)
3338                 self._current_task = setup_phase
3339                 self.scheduler.scheduleSetup(setup_phase)
3340
3341         def _setup_exit(self, setup_phase):
3342                 if self._default_exit(setup_phase) != os.EX_OK:
3343                         self._unlock_builddir()
3344                         self.wait()
3345                         return
3346
3347                 extractor = BinpkgExtractorAsync(background=self.background,
3348                         image_dir=self._image_dir,
3349                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3350                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3351                 self._start_task(extractor, self._extractor_exit)
3352
3353         def _extractor_exit(self, extractor):
3354                 if self._final_exit(extractor) != os.EX_OK:
3355                         self._unlock_builddir()
3356                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3357                                 noiselevel=-1)
3358                 self.wait()
3359
3360         def _unlock_builddir(self):
3361                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3362                 self._build_dir.unlock()
3363
3364         def install(self):
3365
3366                 # This gives bashrc users an opportunity to do various things
3367                 # such as remove binary packages after they're installed.
3368                 settings = self.settings
3369                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3370                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3371
3372                 merge = EbuildMerge(find_blockers=self.find_blockers,
3373                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3374                         pkg=self.pkg, pkg_count=self.pkg_count,
3375                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3376                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3377
3378                 try:
3379                         retval = merge.execute()
3380                 finally:
3381                         settings.pop("PORTAGE_BINPKG_FILE", None)
3382                         self._unlock_builddir()
3383                 return retval
3384
3385 class BinpkgFetcher(SpawnProcess):
3386
3387         __slots__ = ("pkg",
3388                 "locked", "pkg_path", "_lock_obj")
3389
3390         def __init__(self, **kwargs):
3391                 SpawnProcess.__init__(self, **kwargs)
3392                 pkg = self.pkg
3393                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3394
3395         def _start(self):
3396
3397                 if self.cancelled:
3398                         return
3399
3400                 pkg = self.pkg
3401                 bintree = pkg.root_config.trees["bintree"]
3402                 settings = bintree.settings
3403                 use_locks = "distlocks" in settings.features
3404                 pkg_path = self.pkg_path
3405                 resume = os.path.exists(pkg_path)
3406
3407                 # urljoin doesn't work correctly with
3408                 # unrecognized protocols like sftp
3409                 if bintree._remote_has_index:
3410                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3411                         if not rel_uri:
3412                                 rel_uri = pkg.cpv + ".tbz2"
3413                         uri = bintree._remote_base_uri.rstrip("/") + \
3414                                 "/" + rel_uri.lstrip("/")
3415                 else:
3416                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3417                                 "/" + pkg.pf + ".tbz2"
3418
3419                 protocol = urlparse.urlparse(uri)[0]
3420                 fcmd_prefix = "FETCHCOMMAND"
3421                 if resume:
3422                         fcmd_prefix = "RESUMECOMMAND"
3423                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3424                 if not fcmd:
3425                         fcmd = settings.get(fcmd_prefix)
3426
3427                 fcmd_vars = {
3428                         "DISTDIR" : os.path.dirname(pkg_path),
3429                         "URI"     : uri,
3430                         "FILE"    : os.path.basename(pkg_path)
3431                 }
3432
3433                 fetch_env = dict(settings.iteritems())
3434                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3435                         for x in shlex.split(fcmd)]
3436
3437                 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3438                 if use_locks:
3439                         self.lock()
3440
3441                 if self.fd_pipes is None:
3442                         self.fd_pipes = {}
3443                 fd_pipes = self.fd_pipes
3444
3445                 # Redirect all output to stdout since some fetchers like
3446                 # wget pollute stderr (if portage detects a problem then it
3447                 # can send it's own message to stderr).
3448                 fd_pipes.setdefault(0, sys.stdin.fileno())
3449                 fd_pipes.setdefault(1, sys.stdout.fileno())
3450                 fd_pipes.setdefault(2, sys.stdout.fileno())
3451
3452                 self.args = fetch_args
3453                 self.env = fetch_env
3454                 SpawnProcess._start(self)
3455
3456         def _set_returncode(self, wait_retval):
3457                 SpawnProcess._set_returncode(self, wait_retval)
3458                 if self.locked:
3459                         self.unlock()
3460
3461         def lock(self):
3462                 """
3463                 This raises an AlreadyLocked exception if lock() is called
3464                 while a lock is already held. In order to avoid this, call
3465                 unlock() or check whether the "locked" attribute is True
3466                 or False before calling lock().
3467                 """
3468                 if self._lock_obj is not None:
3469                         raise self.AlreadyLocked((self._lock_obj,))
3470
3471                 self._lock_obj = portage.locks.lockfile(
3472                         self.pkg_path, wantnewlockfile=1)
3473                 self.locked = True
3474
3475         class AlreadyLocked(portage.exception.PortageException):
3476                 pass
3477
3478         def unlock(self):
3479                 if self._lock_obj is None:
3480                         return
3481                 portage.locks.unlockfile(self._lock_obj)
3482                 self._lock_obj = None
3483                 self.locked = False
3484
3485 class BinpkgVerifier(AsynchronousTask):
3486         __slots__ = ("logfile", "pkg",)
3487
3488         def _start(self):
3489                 """
3490                 Note: Unlike a normal AsynchronousTask.start() method,
3491                 this one does all work is synchronously. The returncode
3492                 attribute will be set before it returns.
3493                 """
3494
3495                 pkg = self.pkg
3496                 root_config = pkg.root_config
3497                 bintree = root_config.trees["bintree"]
3498                 rval = os.EX_OK
3499                 stdout_orig = sys.stdout
3500                 stderr_orig = sys.stderr
3501                 log_file = None
3502                 if self.background and self.logfile is not None:
3503                         log_file = open(self.logfile, 'a')
3504                 try:
3505                         if log_file is not None:
3506                                 sys.stdout = log_file
3507                                 sys.stderr = log_file
3508                         try:
3509                                 bintree.digestCheck(pkg)
3510                         except portage.exception.FileNotFound:
3511                                 writemsg("!!! Fetching Binary failed " + \
3512                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3513                                 rval = 1
3514                         except portage.exception.DigestException, e:
3515                                 writemsg("\n!!! Digest verification failed:\n",
3516                                         noiselevel=-1)
3517                                 writemsg("!!! %s\n" % e.value[0],
3518                                         noiselevel=-1)
3519                                 writemsg("!!! Reason: %s\n" % e.value[1],
3520                                         noiselevel=-1)
3521                                 writemsg("!!! Got: %s\n" % e.value[2],
3522                                         noiselevel=-1)
3523                                 writemsg("!!! Expected: %s\n" % e.value[3],
3524                                         noiselevel=-1)
3525                                 rval = 1
3526                 finally:
3527                         sys.stdout = stdout_orig
3528                         sys.stderr = stderr_orig
3529                         if log_file is not None:
3530                                 log_file.close()
3531
3532                 self.returncode = rval
3533                 self.wait()
3534
3535 class BinpkgExtractorAsync(SpawnProcess):
3536
3537         __slots__ = ("image_dir", "pkg", "pkg_path")
3538
3539         _shell_binary = portage.const.BASH_BINARY
3540
3541         def _start(self):
3542                 self.args = [self._shell_binary, "-c",
3543                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3544                         (portage._shell_quote(self.pkg_path),
3545                         portage._shell_quote(self.image_dir))]
3546
3547                 self.env = self.pkg.root_config.settings.environ()
3548                 SpawnProcess._start(self)
3549
3550 class MergeListItem(CompositeTask):
3551
3552         """
3553         TODO: For parallel scheduling, everything here needs asynchronous
3554         execution support (start, poll, and wait methods).
3555         """
3556
3557         __slots__ = ("args_set",
3558                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3559                 "find_blockers", "logger", "mtimedb", "pkg",
3560                 "pkg_count", "pkg_to_replace", "prefetcher",
3561                 "settings", "statusMessage", "world_atom") + \
3562                 ("_install_task",)
3563
3564         def _start(self):
3565
3566                 pkg = self.pkg
3567                 build_opts = self.build_opts
3568
3569                 if pkg.installed:
3570                         # uninstall,  executed by self.merge()
3571                         self.returncode = os.EX_OK
3572                         self.wait()
3573                         return
3574
3575                 args_set = self.args_set
3576                 find_blockers = self.find_blockers
3577                 logger = self.logger
3578                 mtimedb = self.mtimedb
3579                 pkg_count = self.pkg_count
3580                 scheduler = self.scheduler
3581                 settings = self.settings
3582                 world_atom = self.world_atom
3583                 ldpath_mtimes = mtimedb["ldpath"]
3584
3585                 action_desc = "Emerging"
3586                 preposition = "for"
3587                 if pkg.type_name == "binary":
3588                         action_desc += " binary"
3589
3590                 if build_opts.fetchonly:
3591                         action_desc = "Fetching"
3592
3593                 msg = "%s (%s of %s) %s" % \
3594                         (action_desc,
3595                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3596                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3597                         colorize("GOOD", pkg.cpv))
3598
3599                 if pkg.root != "/":
3600                         msg += " %s %s" % (preposition, pkg.root)
3601
3602                 if not build_opts.pretend:
3603                         self.statusMessage(msg)
3604                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3605                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3606
3607                 if pkg.type_name == "ebuild":
3608
3609                         build = EbuildBuild(args_set=args_set,
3610                                 background=self.background,
3611                                 config_pool=self.config_pool,
3612                                 find_blockers=find_blockers,
3613                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3614                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3615                                 prefetcher=self.prefetcher, scheduler=scheduler,
3616                                 settings=settings, world_atom=world_atom)
3617
3618                         self._install_task = build
3619                         self._start_task(build, self._default_final_exit)
3620                         return
3621
3622                 elif pkg.type_name == "binary":
3623
3624                         binpkg = Binpkg(background=self.background,
3625                                 find_blockers=find_blockers,
3626                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3627                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3628                                 prefetcher=self.prefetcher, settings=settings,
3629                                 scheduler=scheduler, world_atom=world_atom)
3630
3631                         self._install_task = binpkg
3632                         self._start_task(binpkg, self._default_final_exit)
3633                         return
3634
3635         def _poll(self):
3636                 self._install_task.poll()
3637                 return self.returncode
3638
3639         def _wait(self):
3640                 self._install_task.wait()
3641                 return self.returncode
3642
3643         def merge(self):
3644
3645                 pkg = self.pkg
3646                 build_opts = self.build_opts
3647                 find_blockers = self.find_blockers
3648                 logger = self.logger
3649                 mtimedb = self.mtimedb
3650                 pkg_count = self.pkg_count
3651                 prefetcher = self.prefetcher
3652                 scheduler = self.scheduler
3653                 settings = self.settings
3654                 world_atom = self.world_atom
3655                 ldpath_mtimes = mtimedb["ldpath"]
3656
3657                 if pkg.installed:
3658                         if not (build_opts.buildpkgonly or \
3659                                 build_opts.fetchonly or build_opts.pretend):
3660
3661                                 uninstall = PackageUninstall(background=self.background,
3662                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3663                                         pkg=pkg, scheduler=scheduler, settings=settings)
3664
3665                                 uninstall.start()
3666                                 retval = uninstall.wait()
3667                                 if retval != os.EX_OK:
3668                                         return retval
3669                         return os.EX_OK
3670
3671                 if build_opts.fetchonly or \
3672                         build_opts.buildpkgonly:
3673                         return self.returncode
3674
3675                 retval = self._install_task.install()
3676                 return retval
3677
3678 class PackageMerge(AsynchronousTask):
3679         """
3680         TODO: Implement asynchronous merge so that the scheduler can
3681         run while a merge is executing.
3682         """
3683
3684         __slots__ = ("merge",)
3685
3686         def _start(self):
3687
3688                 pkg = self.merge.pkg
3689                 pkg_count = self.merge.pkg_count
3690
3691                 if pkg.installed:
3692                         action_desc = "Uninstalling"
3693                         preposition = "from"
3694                 else:
3695                         action_desc = "Installing"
3696                         preposition = "to"
3697
3698                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3699
3700                 if pkg.root != "/":
3701                         msg += " %s %s" % (preposition, pkg.root)
3702
3703                 if not self.merge.build_opts.fetchonly and \
3704                         not self.merge.build_opts.pretend and \
3705                         not self.merge.build_opts.buildpkgonly:
3706                         self.merge.statusMessage(msg)
3707
3708                 self.returncode = self.merge.merge()
3709                 self.wait()
3710
3711 class DependencyArg(object):
3712         def __init__(self, arg=None, root_config=None):
3713                 self.arg = arg
3714                 self.root_config = root_config
3715
3716         def __str__(self):
3717                 return self.arg
3718
3719 class AtomArg(DependencyArg):
3720         def __init__(self, atom=None, **kwargs):
3721                 DependencyArg.__init__(self, **kwargs)
3722                 self.atom = atom
3723                 if not isinstance(self.atom, portage.dep.Atom):
3724                         self.atom = portage.dep.Atom(self.atom)
3725                 self.set = (self.atom, )
3726
3727 class PackageArg(DependencyArg):
3728         def __init__(self, package=None, **kwargs):
3729                 DependencyArg.__init__(self, **kwargs)
3730                 self.package = package
3731                 self.atom = portage.dep.Atom("=" + package.cpv)
3732                 self.set = (self.atom, )
3733
3734 class SetArg(DependencyArg):
3735         def __init__(self, set=None, **kwargs):
3736                 DependencyArg.__init__(self, **kwargs)
3737                 self.set = set
3738                 self.name = self.arg[len(SETPREFIX):]
3739
3740 class Dependency(SlotObject):
3741         __slots__ = ("atom", "blocker", "depth",
3742                 "parent", "onlydeps", "priority", "root")
3743         def __init__(self, **kwargs):
3744                 SlotObject.__init__(self, **kwargs)
3745                 if self.priority is None:
3746                         self.priority = DepPriority()
3747                 if self.depth is None:
3748                         self.depth = 0
3749
3750 class BlockerCache(DictMixin):
3751         """This caches blockers of installed packages so that dep_check does not
3752         have to be done for every single installed package on every invocation of
3753         emerge.  The cache is invalidated whenever it is detected that something
3754         has changed that might alter the results of dep_check() calls:
3755                 1) the set of installed packages (including COUNTER) has changed
3756                 2) the old-style virtuals have changed
3757         """
3758
3759         # Number of uncached packages to trigger cache update, since
3760         # it's wasteful to update it for every vdb change.
3761         _cache_threshold = 5
3762
3763         class BlockerData(object):
3764
3765                 __slots__ = ("__weakref__", "atoms", "counter")
3766
3767                 def __init__(self, counter, atoms):
3768                         self.counter = counter
3769                         self.atoms = atoms
3770
3771         def __init__(self, myroot, vardb):
3772                 self._vardb = vardb
3773                 self._virtuals = vardb.settings.getvirtuals()
3774                 self._cache_filename = os.path.join(myroot,
3775                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3776                 self._cache_version = "1"
3777                 self._cache_data = None
3778                 self._modified = set()
3779                 self._load()
3780
3781         def _load(self):
3782                 try:
3783                         f = open(self._cache_filename)
3784                         mypickle = pickle.Unpickler(f)
3785                         mypickle.find_global = None
3786                         self._cache_data = mypickle.load()
3787                         f.close()
3788                         del f
3789                 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3790                         if isinstance(e, pickle.UnpicklingError):
3791                                 writemsg("!!! Error loading '%s': %s\n" % \
3792                                         (self._cache_filename, str(e)), noiselevel=-1)
3793                         del e
3794
3795                 cache_valid = self._cache_data and \
3796                         isinstance(self._cache_data, dict) and \
3797                         self._cache_data.get("version") == self._cache_version and \
3798                         isinstance(self._cache_data.get("blockers"), dict)
3799                 if cache_valid:
3800                         # Validate all the atoms and counters so that
3801                         # corruption is detected as soon as possible.
3802                         invalid_items = set()
3803                         for k, v in self._cache_data["blockers"].iteritems():
3804                                 if not isinstance(k, basestring):
3805                                         invalid_items.add(k)
3806                                         continue
3807                                 try:
3808                                         if portage.catpkgsplit(k) is None:
3809                                                 invalid_items.add(k)
3810                                                 continue
3811                                 except portage.exception.InvalidData:
3812                                         invalid_items.add(k)
3813                                         continue
3814                                 if not isinstance(v, tuple) or \
3815                                         len(v) != 2:
3816                                         invalid_items.add(k)
3817                                         continue
3818                                 counter, atoms = v
3819                                 if not isinstance(counter, (int, long)):
3820                                         invalid_items.add(k)
3821                                         continue
3822                                 if not isinstance(atoms, (list, tuple)):
3823                                         invalid_items.add(k)
3824                                         continue
3825                                 invalid_atom = False
3826                                 for atom in atoms:
3827                                         if not isinstance(atom, basestring):
3828                                                 invalid_atom = True
3829                                                 break
3830                                         if atom[:1] != "!" or \
3831                                                 not portage.isvalidatom(
3832                                                 atom, allow_blockers=True):
3833                                                 invalid_atom = True
3834                                                 break
3835                                 if invalid_atom:
3836                                         invalid_items.add(k)
3837                                         continue
3838
3839                         for k in invalid_items:
3840                                 del self._cache_data["blockers"][k]
3841                         if not self._cache_data["blockers"]:
3842                                 cache_valid = False
3843
3844                 if not cache_valid:
3845                         self._cache_data = {"version":self._cache_version}
3846                         self._cache_data["blockers"] = {}
3847                         self._cache_data["virtuals"] = self._virtuals
3848                 self._modified.clear()
3849
3850         def flush(self):
3851                 """If the current user has permission and the internal blocker cache
3852                 been updated, save it to disk and mark it unmodified.  This is called
3853                 by emerge after it has proccessed blockers for all installed packages.
3854                 Currently, the cache is only written if the user has superuser
3855                 privileges (since that's required to obtain a lock), but all users
3856                 have read access and benefit from faster blocker lookups (as long as
3857                 the entire cache is still valid).  The cache is stored as a pickled
3858                 dict object with the following format:
3859
3860                 {
3861                         version : "1",
3862                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
3863                         "virtuals" : vardb.settings.getvirtuals()
3864                 }
3865                 """
3866                 if len(self._modified) >= self._cache_threshold and \
3867                         secpass >= 2:
3868                         try:
3869                                 f = portage.util.atomic_ofstream(self._cache_filename)
3870                                 pickle.dump(self._cache_data, f, -1)
3871                                 f.close()
3872                                 portage.util.apply_secpass_permissions(
3873                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
3874                         except (IOError, OSError), e:
3875                                 pass
3876                         self._modified.clear()
3877
3878         def __setitem__(self, cpv, blocker_data):
3879                 """
3880                 Update the cache and mark it as modified for a future call to
3881                 self.flush().
3882
3883                 @param cpv: Package for which to cache blockers.
3884                 @type cpv: String
3885                 @param blocker_data: An object with counter and atoms attributes.
3886                 @type blocker_data: BlockerData
3887                 """
3888                 self._cache_data["blockers"][cpv] = \
3889                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
3890                 self._modified.add(cpv)
3891
3892         def __iter__(self):
3893                 return iter(self._cache_data["blockers"])
3894
3895         def __delitem__(self, cpv):
3896                 del self._cache_data["blockers"][cpv]
3897
3898         def __getitem__(self, cpv):
3899                 """
3900                 @rtype: BlockerData
3901                 @returns: An object with counter and atoms attributes.
3902                 """
3903                 return self.BlockerData(*self._cache_data["blockers"][cpv])
3904
3905         def keys(self):
3906                 """This needs to be implemented so that self.__repr__() doesn't raise
3907                 an AttributeError."""
3908                 return list(self)
3909
3910 class BlockerDB(object):
3911
3912         def __init__(self, root_config):
3913                 self._root_config = root_config
3914                 self._vartree = root_config.trees["vartree"]
3915                 self._portdb = root_config.trees["porttree"].dbapi
3916
3917                 self._dep_check_trees = None
3918                 self._fake_vartree = None
3919
3920         def _get_fake_vartree(self, acquire_lock=0):
3921                 fake_vartree = self._fake_vartree
3922                 if fake_vartree is None:
3923                         fake_vartree = FakeVartree(self._root_config,
3924                                 acquire_lock=acquire_lock)
3925                         self._fake_vartree = fake_vartree
3926                         self._dep_check_trees = { self._vartree.root : {
3927                                 "porttree"    :  fake_vartree,
3928                                 "vartree"     :  fake_vartree,
3929                         }}
3930                 else:
3931                         fake_vartree.sync(acquire_lock=acquire_lock)
3932                 return fake_vartree
3933
3934         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
3935                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
3936                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3937                 settings = self._vartree.settings
3938                 stale_cache = set(blocker_cache)
3939                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
3940                 dep_check_trees = self._dep_check_trees
3941                 vardb = fake_vartree.dbapi
3942                 installed_pkgs = list(vardb)
3943
3944                 for inst_pkg in installed_pkgs:
3945                         stale_cache.discard(inst_pkg.cpv)
3946                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
3947                         if cached_blockers is not None and \
3948                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
3949                                 cached_blockers = None
3950                         if cached_blockers is not None:
3951                                 blocker_atoms = cached_blockers.atoms
3952                         else:
3953                                 # Use aux_get() to trigger FakeVartree global
3954                                 # updates on *DEPEND when appropriate.
3955                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
3956                                 try:
3957                                         portage.dep._dep_check_strict = False
3958                                         success, atoms = portage.dep_check(depstr,
3959                                                 vardb, settings, myuse=inst_pkg.use.enabled,
3960                                                 trees=dep_check_trees, myroot=inst_pkg.root)
3961                                 finally:
3962                                         portage.dep._dep_check_strict = True
3963                                 if not success:
3964                                         pkg_location = os.path.join(inst_pkg.root,
3965                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
3966                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
3967                                                 (pkg_location, atoms), noiselevel=-1)
3968                                         continue
3969
3970                                 blocker_atoms = [atom for atom in atoms \
3971                                         if atom.startswith("!")]
3972                                 blocker_atoms.sort()
3973                                 counter = long(inst_pkg.metadata["COUNTER"])
3974                                 blocker_cache[inst_pkg.cpv] = \
3975                                         blocker_cache.BlockerData(counter, blocker_atoms)
3976                 for cpv in stale_cache:
3977                         del blocker_cache[cpv]
3978                 blocker_cache.flush()
3979
3980                 blocker_parents = digraph()
3981                 blocker_atoms = []
3982                 for pkg in installed_pkgs:
3983                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
3984                                 blocker_atom = blocker_atom.lstrip("!")
3985                                 blocker_atoms.append(blocker_atom)
3986                                 blocker_parents.add(blocker_atom, pkg)
3987
3988                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3989                 blocking_pkgs = set()
3990                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
3991                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
3992
3993                 # Check for blockers in the other direction.
3994                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
3995                 try:
3996                         portage.dep._dep_check_strict = False
3997                         success, atoms = portage.dep_check(depstr,
3998                                 vardb, settings, myuse=new_pkg.use.enabled,
3999                                 trees=dep_check_trees, myroot=new_pkg.root)
4000                 finally:
4001                         portage.dep._dep_check_strict = True
4002                 if not success:
4003                         # We should never get this far with invalid deps.
4004                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4005                         assert False
4006
4007                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4008                         if atom[:1] == "!"]
4009                 if blocker_atoms:
4010                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4011                         for inst_pkg in installed_pkgs:
4012                                 try:
4013                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4014                                 except (portage.exception.InvalidDependString, StopIteration):
4015                                         continue
4016                                 blocking_pkgs.add(inst_pkg)
4017
4018                 return blocking_pkgs
4019
4020 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4021
4022         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4023                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4024         p_type, p_root, p_key, p_status = parent_node
4025         msg = []
4026         if p_status == "nomerge":
4027                 category, pf = portage.catsplit(p_key)
4028                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4029                 msg.append("Portage is unable to process the dependencies of the ")
4030                 msg.append("'%s' package. " % p_key)
4031                 msg.append("In order to correct this problem, the package ")
4032                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4033                 msg.append("As a temporary workaround, the --nodeps option can ")
4034                 msg.append("be used to ignore all dependencies.  For reference, ")
4035                 msg.append("the problematic dependencies can be found in the ")
4036                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4037         else:
4038                 msg.append("This package can not be installed. ")
4039                 msg.append("Please notify the '%s' package maintainer " % p_key)
4040                 msg.append("about this problem.")
4041
4042         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4043         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4044
4045 class PackageVirtualDbapi(portage.dbapi):
4046         """
4047         A dbapi-like interface class that represents the state of the installed
4048         package database as new packages are installed, replacing any packages
4049         that previously existed in the same slot. The main difference between
4050         this class and fakedbapi is that this one uses Package instances
4051         internally (passed in via cpv_inject() and cpv_remove() calls).
4052         """
4053         def __init__(self, settings):
4054                 portage.dbapi.__init__(self)
4055                 self.settings = settings
4056                 self._match_cache = {}
4057                 self._cp_map = {}
4058                 self._cpv_map = {}
4059
4060         def clear(self):
4061                 """
4062                 Remove all packages.
4063                 """
4064                 if self._cpv_map:
4065                         self._clear_cache()
4066                         self._cp_map.clear()
4067                         self._cpv_map.clear()
4068
4069         def copy(self):
4070                 obj = PackageVirtualDbapi(self.settings)
4071                 obj._match_cache = self._match_cache.copy()
4072                 obj._cp_map = self._cp_map.copy()
4073                 for k, v in obj._cp_map.iteritems():
4074                         obj._cp_map[k] = v[:]
4075                 obj._cpv_map = self._cpv_map.copy()
4076                 return obj
4077
4078         def __iter__(self):
4079                 return self._cpv_map.itervalues()
4080
4081         def __contains__(self, item):
4082                 existing = self._cpv_map.get(item.cpv)
4083                 if existing is not None and \
4084                         existing == item:
4085                         return True
4086                 return False
4087
4088         def get(self, item, default=None):
4089                 cpv = getattr(item, "cpv", None)
4090                 if cpv is None:
4091                         if len(item) != 4:
4092                                 return default
4093                         type_name, root, cpv, operation = item
4094
4095                 existing = self._cpv_map.get(cpv)
4096                 if existing is not None and \
4097                         existing == item:
4098                         return existing
4099                 return default
4100
4101         def match_pkgs(self, atom):
4102                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4103
4104         def _clear_cache(self):
4105                 if self._categories is not None:
4106                         self._categories = None
4107                 if self._match_cache:
4108                         self._match_cache = {}
4109
4110         def match(self, origdep, use_cache=1):
4111                 result = self._match_cache.get(origdep)
4112                 if result is not None:
4113                         return result[:]
4114                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4115                 self._match_cache[origdep] = result
4116                 return result[:]
4117
4118         def cpv_exists(self, cpv):
4119                 return cpv in self._cpv_map
4120
4121         def cp_list(self, mycp, use_cache=1):
4122                 cachelist = self._match_cache.get(mycp)
4123                 # cp_list() doesn't expand old-style virtuals
4124                 if cachelist and cachelist[0].startswith(mycp):
4125                         return cachelist[:]
4126                 cpv_list = self._cp_map.get(mycp)
4127                 if cpv_list is None:
4128                         cpv_list = []
4129                 else:
4130                         cpv_list = [pkg.cpv for pkg in cpv_list]
4131                 self._cpv_sort_ascending(cpv_list)
4132                 if not (not cpv_list and mycp.startswith("virtual/")):
4133                         self._match_cache[mycp] = cpv_list
4134                 return cpv_list[:]
4135
4136         def cp_all(self):
4137                 return list(self._cp_map)
4138
4139         def cpv_all(self):
4140                 return list(self._cpv_map)
4141
4142         def cpv_inject(self, pkg):
4143                 cp_list = self._cp_map.get(pkg.cp)
4144                 if cp_list is None:
4145                         cp_list = []
4146                         self._cp_map[pkg.cp] = cp_list
4147                 e_pkg = self._cpv_map.get(pkg.cpv)
4148                 if e_pkg is not None:
4149                         if e_pkg == pkg:
4150                                 return
4151                         self.cpv_remove(e_pkg)
4152                 for e_pkg in cp_list:
4153                         if e_pkg.slot_atom == pkg.slot_atom:
4154                                 if e_pkg == pkg:
4155                                         return
4156                                 self.cpv_remove(e_pkg)
4157                                 break
4158                 cp_list.append(pkg)
4159                 self._cpv_map[pkg.cpv] = pkg
4160                 self._clear_cache()
4161
4162         def cpv_remove(self, pkg):
4163                 old_pkg = self._cpv_map.get(pkg.cpv)
4164                 if old_pkg != pkg:
4165                         raise KeyError(pkg)
4166                 self._cp_map[pkg.cp].remove(pkg)
4167                 del self._cpv_map[pkg.cpv]
4168                 self._clear_cache()
4169
4170         def aux_get(self, cpv, wants):
4171                 metadata = self._cpv_map[cpv].metadata
4172                 return [metadata.get(x, "") for x in wants]
4173
4174         def aux_update(self, cpv, values):
4175                 self._cpv_map[cpv].metadata.update(values)
4176                 self._clear_cache()
4177
4178 class depgraph(object):
4179
4180         pkg_tree_map = RootConfig.pkg_tree_map
4181
4182         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4183
4184         def __init__(self, settings, trees, myopts, myparams, spinner):
4185                 self.settings = settings
4186                 self.target_root = settings["ROOT"]
4187                 self.myopts = myopts
4188                 self.myparams = myparams
4189                 self.edebug = 0
4190                 if settings.get("PORTAGE_DEBUG", "") == "1":
4191                         self.edebug = 1
4192                 self.spinner = spinner
4193                 self._running_root = trees["/"]["root_config"]
4194                 self._opts_no_restart = Scheduler._opts_no_restart
4195                 self.pkgsettings = {}
4196                 # Maps slot atom to package for each Package added to the graph.
4197                 self._slot_pkg_map = {}
4198                 # Maps nodes to the reasons they were selected for reinstallation.
4199                 self._reinstall_nodes = {}
4200                 self.mydbapi = {}
4201                 self.trees = {}
4202                 self._trees_orig = trees
4203                 self.roots = {}
4204                 # Contains a filtered view of preferred packages that are selected
4205                 # from available repositories.
4206                 self._filtered_trees = {}
4207                 # Contains installed packages and new packages that have been added
4208                 # to the graph.
4209                 self._graph_trees = {}
4210                 # All Package instances
4211                 self._pkg_cache = self._package_cache(self)
4212                 for myroot in trees:
4213                         self.trees[myroot] = {}
4214                         # Create a RootConfig instance that references
4215                         # the FakeVartree instead of the real one.
4216                         self.roots[myroot] = RootConfig(
4217                                 trees[myroot]["vartree"].settings,
4218                                 self.trees[myroot],
4219                                 trees[myroot]["root_config"].setconfig)
4220                         for tree in ("porttree", "bintree"):
4221                                 self.trees[myroot][tree] = trees[myroot][tree]
4222                         self.trees[myroot]["vartree"] = \
4223                                 FakeVartree(trees[myroot]["root_config"],
4224                                         pkg_cache=self._pkg_cache)
4225                         self.pkgsettings[myroot] = portage.config(
4226                                 clone=self.trees[myroot]["vartree"].settings)
4227                         self._slot_pkg_map[myroot] = {}
4228                         vardb = self.trees[myroot]["vartree"].dbapi
4229                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4230                                 "--buildpkgonly" not in self.myopts
4231                         # This fakedbapi instance will model the state that the vdb will
4232                         # have after new packages have been installed.
4233                         fakedb = PackageVirtualDbapi(vardb.settings)
4234                         if preload_installed_pkgs:
4235                                 for pkg in vardb:
4236                                         self.spinner.update()
4237                                         # This triggers metadata updates via FakeVartree.
4238                                         vardb.aux_get(pkg.cpv, [])
4239                                         fakedb.cpv_inject(pkg)
4240
4241                         # Now that the vardb state is cached in our FakeVartree,
4242                         # we won't be needing the real vartree cache for awhile.
4243                         # To make some room on the heap, clear the vardbapi
4244                         # caches.
4245                         trees[myroot]["vartree"].dbapi._clear_cache()
4246                         gc.collect()
4247
4248                         self.mydbapi[myroot] = fakedb
4249                         def graph_tree():
4250                                 pass
4251                         graph_tree.dbapi = fakedb
4252                         self._graph_trees[myroot] = {}
4253                         self._filtered_trees[myroot] = {}
4254                         # Substitute the graph tree for the vartree in dep_check() since we
4255                         # want atom selections to be consistent with package selections
4256                         # have already been made.
4257                         self._graph_trees[myroot]["porttree"]   = graph_tree
4258                         self._graph_trees[myroot]["vartree"]    = graph_tree
4259                         def filtered_tree():
4260                                 pass
4261                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4262                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4263
4264                         # Passing in graph_tree as the vartree here could lead to better
4265                         # atom selections in some cases by causing atoms for packages that
4266                         # have been added to the graph to be preferred over other choices.
4267                         # However, it can trigger atom selections that result in
4268                         # unresolvable direct circular dependencies. For example, this
4269                         # happens with gwydion-dylan which depends on either itself or
4270                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4271                         # gwydion-dylan-bin needs to be selected in order to avoid a
4272                         # an unresolvable direct circular dependency.
4273                         #
4274                         # To solve the problem described above, pass in "graph_db" so that
4275                         # packages that have been added to the graph are distinguishable
4276                         # from other available packages and installed packages. Also, pass
4277                         # the parent package into self._select_atoms() calls so that
4278                         # unresolvable direct circular dependencies can be detected and
4279                         # avoided when possible.
4280                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4281                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4282
4283                         dbs = []
4284                         portdb = self.trees[myroot]["porttree"].dbapi
4285                         bindb  = self.trees[myroot]["bintree"].dbapi
4286                         vardb  = self.trees[myroot]["vartree"].dbapi
4287                         #               (db, pkg_type, built, installed, db_keys)
4288                         if "--usepkgonly" not in self.myopts:
4289                                 db_keys = list(portdb._aux_cache_keys)
4290                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4291                         if "--usepkg" in self.myopts:
4292                                 db_keys = list(bindb._aux_cache_keys)
4293                                 dbs.append((bindb,  "binary", True, False, db_keys))
4294                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4295                         dbs.append((vardb, "installed", True, True, db_keys))
4296                         self._filtered_trees[myroot]["dbs"] = dbs
4297                         if "--usepkg" in self.myopts:
4298                                 self.trees[myroot]["bintree"].populate(
4299                                         "--getbinpkg" in self.myopts,
4300                                         "--getbinpkgonly" in self.myopts)
4301                 del trees
4302
4303                 self.digraph=portage.digraph()
4304                 # contains all sets added to the graph
4305                 self._sets = {}
4306                 # contains atoms given as arguments
4307                 self._sets["args"] = InternalPackageSet()
4308                 # contains all atoms from all sets added to the graph, including
4309                 # atoms given as arguments
4310                 self._set_atoms = InternalPackageSet()
4311                 self._atom_arg_map = {}
4312                 # contains all nodes pulled in by self._set_atoms
4313                 self._set_nodes = set()
4314                 # Contains only Blocker -> Uninstall edges
4315                 self._blocker_uninstalls = digraph()
4316                 # Contains only Package -> Blocker edges
4317                 self._blocker_parents = digraph()
4318                 # Contains only irrelevant Package -> Blocker edges
4319                 self._irrelevant_blockers = digraph()
4320                 # Contains only unsolvable Package -> Blocker edges
4321                 self._unsolvable_blockers = digraph()
4322                 self._slot_collision_info = {}
4323                 # Slot collision nodes are not allowed to block other packages since
4324                 # blocker validation is only able to account for one package per slot.
4325                 self._slot_collision_nodes = set()
4326                 self._parent_atoms = {}
4327                 self._slot_conflict_parent_atoms = set()
4328                 self._serialized_tasks_cache = None
4329                 self._scheduler_graph = None
4330                 self._displayed_list = None
4331                 self._pprovided_args = []
4332                 self._missing_args = []
4333                 self._masked_installed = set()
4334                 self._unsatisfied_deps_for_display = []
4335                 self._unsatisfied_blockers_for_display = None
4336                 self._circular_deps_for_display = None
4337                 self._dep_stack = []
4338                 self._unsatisfied_deps = []
4339                 self._initially_unsatisfied_deps = []
4340                 self._ignored_deps = []
4341                 self._required_set_names = set(["system", "world"])
4342                 self._select_atoms = self._select_atoms_highest_available
4343                 self._select_package = self._select_pkg_highest_available
4344                 self._highest_pkg_cache = {}
4345
4346         def _show_slot_collision_notice(self):
4347                 """Show an informational message advising the user to mask one of the
4348                 the packages. In some cases it may be possible to resolve this
4349                 automatically, but support for backtracking (removal nodes that have
4350                 already been selected) will be required in order to handle all possible
4351                 cases.
4352                 """
4353
4354                 if not self._slot_collision_info:
4355                         return
4356
4357                 self._show_merge_list()
4358
4359                 msg = []
4360                 msg.append("\n!!! Multiple package instances within a single " + \
4361                         "package slot have been pulled\n")
4362                 msg.append("!!! into the dependency graph, resulting" + \
4363                         " in a slot conflict:\n\n")
4364                 indent = "  "
4365                 # Max number of parents shown, to avoid flooding the display.
4366                 max_parents = 3
4367                 explanation_columns = 70
4368                 explanations = 0
4369                 for (slot_atom, root), slot_nodes \
4370                         in self._slot_collision_info.iteritems():
4371                         msg.append(str(slot_atom))
4372                         msg.append("\n\n")
4373
4374                         for node in slot_nodes:
4375                                 msg.append(indent)
4376                                 msg.append(str(node))
4377                                 parent_atoms = self._parent_atoms.get(node)
4378                                 if parent_atoms:
4379                                         pruned_list = set()
4380                                         # Prefer conflict atoms over others.
4381                                         for parent_atom in parent_atoms:
4382                                                 if len(pruned_list) >= max_parents:
4383                                                         break
4384                                                 if parent_atom in self._slot_conflict_parent_atoms:
4385                                                         pruned_list.add(parent_atom)
4386
4387                                         # If this package was pulled in by conflict atoms then
4388                                         # show those alone since those are the most interesting.
4389                                         if not pruned_list:
4390                                                 # When generating the pruned list, prefer instances
4391                                                 # of DependencyArg over instances of Package.
4392                                                 for parent_atom in parent_atoms:
4393                                                         if len(pruned_list) >= max_parents:
4394                                                                 break
4395                                                         parent, atom = parent_atom
4396                                                         if isinstance(parent, DependencyArg):
4397                                                                 pruned_list.add(parent_atom)
4398                                                 # Prefer Packages instances that themselves have been
4399                                                 # pulled into collision slots.
4400                                                 for parent_atom in parent_atoms:
4401                                                         if len(pruned_list) >= max_parents:
4402                                                                 break
4403                                                         parent, atom = parent_atom
4404                                                         if isinstance(parent, Package) and \
4405                                                                 (parent.slot_atom, parent.root) \
4406                                                                 in self._slot_collision_info:
4407                                                                 pruned_list.add(parent_atom)
4408                                                 for parent_atom in parent_atoms:
4409                                                         if len(pruned_list) >= max_parents:
4410                                                                 break
4411                                                         pruned_list.add(parent_atom)
4412                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4413                                         parent_atoms = pruned_list
4414                                         msg.append(" pulled in by\n")
4415                                         for parent_atom in parent_atoms:
4416                                                 parent, atom = parent_atom
4417                                                 msg.append(2*indent)
4418                                                 if isinstance(parent,
4419                                                         (PackageArg, AtomArg)):
4420                                                         # For PackageArg and AtomArg types, it's
4421                                                         # redundant to display the atom attribute.
4422                                                         msg.append(str(parent))
4423                                                 else:
4424                                                         # Display the specific atom from SetArg or
4425                                                         # Package types.
4426                                                         msg.append("%s required by %s" % (atom, parent))
4427                                                 msg.append("\n")
4428                                         if omitted_parents:
4429                                                 msg.append(2*indent)
4430                                                 msg.append("(and %d more)\n" % omitted_parents)
4431                                 else:
4432                                         msg.append(" (no parents)\n")
4433                                 msg.append("\n")
4434                         explanation = self._slot_conflict_explanation(slot_nodes)
4435                         if explanation:
4436                                 explanations += 1
4437                                 msg.append(indent + "Explanation:\n\n")
4438                                 for line in textwrap.wrap(explanation, explanation_columns):
4439                                         msg.append(2*indent + line + "\n")
4440                                 msg.append("\n")
4441                 msg.append("\n")
4442                 sys.stderr.write("".join(msg))
4443                 sys.stderr.flush()
4444
4445                 explanations_for_all = explanations == len(self._slot_collision_info)
4446
4447                 if explanations_for_all or "--quiet" in self.myopts:
4448                         return
4449
4450                 msg = []
4451                 msg.append("It may be possible to solve this problem ")
4452                 msg.append("by using package.mask to prevent one of ")
4453                 msg.append("those packages from being selected. ")
4454                 msg.append("However, it is also possible that conflicting ")
4455                 msg.append("dependencies exist such that they are impossible to ")
4456                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4457                 msg.append("the dependencies of two different packages, then those ")
4458                 msg.append("packages can not be installed simultaneously.")
4459
4460                 from formatter import AbstractFormatter, DumbWriter
4461                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4462                 for x in msg:
4463                         f.add_flowing_data(x)
4464                 f.end_paragraph(1)
4465
4466                 msg = []
4467                 msg.append("For more information, see MASKED PACKAGES ")
4468                 msg.append("section in the emerge man page or refer ")
4469                 msg.append("to the Gentoo Handbook.")
4470                 for x in msg:
4471                         f.add_flowing_data(x)
4472                 f.end_paragraph(1)
4473                 f.writer.flush()
4474
4475         def _slot_conflict_explanation(self, slot_nodes):
4476                 """
4477                 When a slot conflict occurs due to USE deps, there are a few
4478                 different cases to consider:
4479
4480                 1) New USE are correctly set but --newuse wasn't requested so an
4481                    installed package with incorrect USE happened to get pulled
4482                    into graph before the new one.
4483
4484                 2) New USE are incorrectly set but an installed package has correct
4485                    USE so it got pulled into the graph, and a new instance also got
4486                    pulled in due to --newuse or an upgrade.
4487
4488                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4489                    and multiple package instances got pulled into the same slot to
4490                    satisfy the conflicting deps.
4491
4492                 Currently, explanations and suggested courses of action are generated
4493                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4494                 """
4495
4496                 if len(slot_nodes) != 2:
4497                         # Suggestions are only implemented for
4498                         # conflicts between two packages.
4499                         return None
4500
4501                 all_conflict_atoms = self._slot_conflict_parent_atoms
4502                 matched_node = None
4503                 matched_atoms = None
4504                 unmatched_node = None
4505                 for node in slot_nodes:
4506                         parent_atoms = self._parent_atoms.get(node)
4507                         if not parent_atoms:
4508                                 # Normally, there are always parent atoms. If there are
4509                                 # none then something unexpected is happening and there's
4510                                 # currently no suggestion for this case.
4511                                 return None
4512                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4513                         for parent_atom in conflict_atoms:
4514                                 parent, atom = parent_atom
4515                                 if not atom.use:
4516                                         # Suggestions are currently only implemented for cases
4517                                         # in which all conflict atoms have USE deps.
4518                                         return None
4519                         if conflict_atoms:
4520                                 if matched_node is not None:
4521                                         # If conflict atoms match multiple nodes
4522                                         # then there's no suggestion.
4523                                         return None
4524                                 matched_node = node
4525                                 matched_atoms = conflict_atoms
4526                         else:
4527                                 if unmatched_node is not None:
4528                                         # Neither node is matched by conflict atoms, and
4529                                         # there is no suggestion for this case.
4530                                         return None
4531                                 unmatched_node = node
4532
4533                 if matched_node is None or unmatched_node is None:
4534                         # This shouldn't happen.
4535                         return None
4536
4537                 if unmatched_node.installed and not matched_node.installed:
4538                         return "New USE are correctly set, but --newuse wasn't" + \
4539                                 " requested, so an installed package with incorrect USE " + \
4540                                 "happened to get pulled into the dependency graph. " + \
4541                                 "In order to solve " + \
4542                                 "this, either specify the --newuse option or explicitly " + \
4543                                 " reinstall '%s'." % matched_node.slot_atom
4544
4545                 if matched_node.installed and not unmatched_node.installed:
4546                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4547                         explanation = ("New USE for '%s' are incorrectly set. " + \
4548                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4549                                 (matched_node.slot_atom, atoms[0])
4550                         if len(atoms) > 1:
4551                                 for atom in atoms[1:-1]:
4552                                         explanation += ", '%s'" % (atom,)
4553                                 if len(atoms) > 2:
4554                                         explanation += ","
4555                                 explanation += " and '%s'" % (atoms[-1],)
4556                         explanation += "."
4557                         return explanation
4558
4559                 return None
4560
4561         def _process_slot_conflicts(self):
4562                 """
4563                 Process slot conflict data to identify specific atoms which
4564                 lead to conflict. These atoms only match a subset of the
4565                 packages that have been pulled into a given slot.
4566                 """
4567                 for (slot_atom, root), slot_nodes \
4568                         in self._slot_collision_info.iteritems():
4569
4570                         all_parent_atoms = set()
4571                         for pkg in slot_nodes:
4572                                 parent_atoms = self._parent_atoms.get(pkg)
4573                                 if not parent_atoms:
4574                                         continue
4575                                 all_parent_atoms.update(parent_atoms)
4576
4577                         for pkg in slot_nodes:
4578                                 parent_atoms = self._parent_atoms.get(pkg)
4579                                 if parent_atoms is None:
4580                                         parent_atoms = set()
4581                                         self._parent_atoms[pkg] = parent_atoms
4582                                 for parent_atom in all_parent_atoms:
4583                                         if parent_atom in parent_atoms:
4584                                                 continue
4585                                         # Use package set for matching since it will match via
4586                                         # PROVIDE when necessary, while match_from_list does not.
4587                                         parent, atom = parent_atom
4588                                         atom_set = InternalPackageSet(
4589                                                 initial_atoms=(atom,))
4590                                         if atom_set.findAtomForPackage(pkg):
4591                                                 parent_atoms.add(parent_atom)
4592                                         else:
4593                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4594
4595         def _reinstall_for_flags(self, forced_flags,
4596                 orig_use, orig_iuse, cur_use, cur_iuse):
4597                 """Return a set of flags that trigger reinstallation, or None if there
4598                 are no such flags."""
4599                 if "--newuse" in self.myopts:
4600                         flags = set(orig_iuse.symmetric_difference(
4601                                 cur_iuse).difference(forced_flags))
4602                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4603                                 cur_iuse.intersection(cur_use)))
4604                         if flags:
4605                                 return flags
4606                 elif "changed-use" == self.myopts.get("--reinstall"):
4607                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4608                                 cur_iuse.intersection(cur_use))
4609                         if flags:
4610                                 return flags
4611                 return None
4612
4613         def _create_graph(self, allow_unsatisfied=False):
4614                 dep_stack = self._dep_stack
4615                 while dep_stack:
4616                         self.spinner.update()
4617                         dep = dep_stack.pop()
4618                         if isinstance(dep, Package):
4619                                 if not self._add_pkg_deps(dep,
4620                                         allow_unsatisfied=allow_unsatisfied):
4621                                         return 0
4622                                 continue
4623                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4624                                 return 0
4625                 return 1
4626
4627         def _add_dep(self, dep, allow_unsatisfied=False):
4628                 debug = "--debug" in self.myopts
4629                 buildpkgonly = "--buildpkgonly" in self.myopts
4630                 nodeps = "--nodeps" in self.myopts
4631                 empty = "empty" in self.myparams
4632                 deep = "deep" in self.myparams
4633                 update = "--update" in self.myopts and dep.depth <= 1
4634                 if dep.blocker:
4635                         if not buildpkgonly and \
4636                                 not nodeps and \
4637                                 dep.parent not in self._slot_collision_nodes:
4638                                 if dep.parent.onlydeps:
4639                                         # It's safe to ignore blockers if the
4640                                         # parent is an --onlydeps node.
4641                                         return 1
4642                                 # The blocker applies to the root where
4643                                 # the parent is or will be installed.
4644                                 blocker = Blocker(atom=dep.atom,
4645                                         eapi=dep.parent.metadata["EAPI"],
4646                                         root=dep.parent.root)
4647                                 self._blocker_parents.add(blocker, dep.parent)
4648                         return 1
4649                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4650                         onlydeps=dep.onlydeps)
4651                 if not dep_pkg:
4652                         if allow_unsatisfied:
4653                                 self._unsatisfied_deps.append(dep)
4654                                 return 1
4655                         self._unsatisfied_deps_for_display.append(
4656                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4657                         return 0
4658                 # In some cases, dep_check will return deps that shouldn't
4659                 # be proccessed any further, so they are identified and
4660                 # discarded here. Try to discard as few as possible since
4661                 # discarded dependencies reduce the amount of information
4662                 # available for optimization of merge order.
4663                 if dep.priority.satisfied and \
4664                         not (existing_node or empty or deep or update):
4665                         myarg = None
4666                         if dep.root == self.target_root:
4667                                 try:
4668                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4669                                 except StopIteration:
4670                                         pass
4671                                 except portage.exception.InvalidDependString:
4672                                         if not dep_pkg.installed:
4673                                                 # This shouldn't happen since the package
4674                                                 # should have been masked.
4675                                                 raise
4676                         if not myarg:
4677                                 self._ignored_deps.append(dep)
4678                                 return 1
4679
4680                 if not self._add_pkg(dep_pkg, dep):
4681                         return 0
4682                 return 1
4683
4684         def _add_pkg(self, pkg, dep):
4685                 myparent = None
4686                 priority = None
4687                 depth = 0
4688                 if dep is None:
4689                         dep = Dependency()
4690                 else:
4691                         myparent = dep.parent
4692                         priority = dep.priority
4693                         depth = dep.depth
4694                 if priority is None:
4695                         priority = DepPriority()
4696                 """
4697                 Fills the digraph with nodes comprised of packages to merge.
4698                 mybigkey is the package spec of the package to merge.
4699                 myparent is the package depending on mybigkey ( or None )
4700                 addme = Should we add this package to the digraph or are we just looking at it's deps?
4701                         Think --onlydeps, we need to ignore packages in that case.
4702                 #stuff to add:
4703                 #SLOT-aware emerge
4704                 #IUSE-aware emerge -> USE DEP aware depgraph
4705                 #"no downgrade" emerge
4706                 """
4707                 # Ensure that the dependencies of the same package
4708                 # are never processed more than once.
4709                 previously_added = pkg in self.digraph
4710
4711                 # select the correct /var database that we'll be checking against
4712                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4713                 pkgsettings = self.pkgsettings[pkg.root]
4714
4715                 arg_atoms = None
4716                 if True:
4717                         try:
4718                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4719                         except portage.exception.InvalidDependString, e:
4720                                 if not pkg.installed:
4721                                         show_invalid_depstring_notice(
4722                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4723                                         return 0
4724                                 del e
4725
4726                 if not pkg.onlydeps:
4727                         if not pkg.installed and \
4728                                 "empty" not in self.myparams and \
4729                                 vardbapi.match(pkg.slot_atom):
4730                                 # Increase the priority of dependencies on packages that
4731                                 # are being rebuilt. This optimizes merge order so that
4732                                 # dependencies are rebuilt/updated as soon as possible,
4733                                 # which is needed especially when emerge is called by
4734                                 # revdep-rebuild since dependencies may be affected by ABI
4735                                 # breakage that has rendered them useless. Don't adjust
4736                                 # priority here when in "empty" mode since all packages
4737                                 # are being merged in that case.
4738                                 priority.rebuild = True
4739
4740                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4741                         slot_collision = False
4742                         if existing_node:
4743                                 existing_node_matches = pkg.cpv == existing_node.cpv
4744                                 if existing_node_matches and \
4745                                         pkg != existing_node and \
4746                                         dep.atom is not None:
4747                                         # Use package set for matching since it will match via
4748                                         # PROVIDE when necessary, while match_from_list does not.
4749                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4750                                         if not atom_set.findAtomForPackage(existing_node):
4751                                                 existing_node_matches = False
4752                                 if existing_node_matches:
4753                                         # The existing node can be reused.
4754                                         if arg_atoms:
4755                                                 for parent_atom in arg_atoms:
4756                                                         parent, atom = parent_atom
4757                                                         self.digraph.add(existing_node, parent,
4758                                                                 priority=priority)
4759                                                         self._add_parent_atom(existing_node, parent_atom)
4760                                         # If a direct circular dependency is not an unsatisfied
4761                                         # buildtime dependency then drop it here since otherwise
4762                                         # it can skew the merge order calculation in an unwanted
4763                                         # way.
4764                                         if existing_node != myparent or \
4765                                                 (priority.buildtime and not priority.satisfied):
4766                                                 self.digraph.addnode(existing_node, myparent,
4767                                                         priority=priority)
4768                                                 if dep.atom is not None and dep.parent is not None:
4769                                                         self._add_parent_atom(existing_node,
4770                                                                 (dep.parent, dep.atom))
4771                                         return 1
4772                                 else:
4773
4774                                         # A slot collision has occurred.  Sometimes this coincides
4775                                         # with unresolvable blockers, so the slot collision will be
4776                                         # shown later if there are no unresolvable blockers.
4777                                         self._add_slot_conflict(pkg)
4778                                         slot_collision = True
4779
4780                         if slot_collision:
4781                                 # Now add this node to the graph so that self.display()
4782                                 # can show use flags and --tree portage.output.  This node is
4783                                 # only being partially added to the graph.  It must not be
4784                                 # allowed to interfere with the other nodes that have been
4785                                 # added.  Do not overwrite data for existing nodes in
4786                                 # self.mydbapi since that data will be used for blocker
4787                                 # validation.
4788                                 # Even though the graph is now invalid, continue to process
4789                                 # dependencies so that things like --fetchonly can still
4790                                 # function despite collisions.
4791                                 pass
4792                         else:
4793                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4794                                 self.mydbapi[pkg.root].cpv_inject(pkg)
4795
4796                         if not pkg.installed:
4797                                 # Allow this package to satisfy old-style virtuals in case it
4798                                 # doesn't already. Any pre-existing providers will be preferred
4799                                 # over this one.
4800                                 try:
4801                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
4802                                         # For consistency, also update the global virtuals.
4803                                         settings = self.roots[pkg.root].settings
4804                                         settings.unlock()
4805                                         settings.setinst(pkg.cpv, pkg.metadata)
4806                                         settings.lock()
4807                                 except portage.exception.InvalidDependString, e:
4808                                         show_invalid_depstring_notice(
4809                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4810                                         del e
4811                                         return 0
4812
4813                 if arg_atoms:
4814                         self._set_nodes.add(pkg)
4815
4816                 # Do this even when addme is False (--onlydeps) so that the
4817                 # parent/child relationship is always known in case
4818                 # self._show_slot_collision_notice() needs to be called later.
4819                 self.digraph.add(pkg, myparent, priority=priority)
4820                 if dep.atom is not None and dep.parent is not None:
4821                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
4822
4823                 if arg_atoms:
4824                         for parent_atom in arg_atoms:
4825                                 parent, atom = parent_atom
4826                                 self.digraph.add(pkg, parent, priority=priority)
4827                                 self._add_parent_atom(pkg, parent_atom)
4828
4829                 """ This section determines whether we go deeper into dependencies or not.
4830                     We want to go deeper on a few occasions:
4831                     Installing package A, we need to make sure package A's deps are met.
4832                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4833                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4834                 """
4835                 dep_stack = self._dep_stack
4836                 if "recurse" not in self.myparams:
4837                         return 1
4838                 elif pkg.installed and \
4839                         "deep" not in self.myparams:
4840                         dep_stack = self._ignored_deps
4841
4842                 self.spinner.update()
4843
4844                 if arg_atoms:
4845                         depth = 0
4846                 pkg.depth = depth
4847                 if not previously_added:
4848                         dep_stack.append(pkg)
4849                 return 1
4850
4851         def _add_parent_atom(self, pkg, parent_atom):
4852                 parent_atoms = self._parent_atoms.get(pkg)
4853                 if parent_atoms is None:
4854                         parent_atoms = set()
4855                         self._parent_atoms[pkg] = parent_atoms
4856                 parent_atoms.add(parent_atom)
4857
4858         def _add_slot_conflict(self, pkg):
4859                 self._slot_collision_nodes.add(pkg)
4860                 slot_key = (pkg.slot_atom, pkg.root)
4861                 slot_nodes = self._slot_collision_info.get(slot_key)
4862                 if slot_nodes is None:
4863                         slot_nodes = set()
4864                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
4865                         self._slot_collision_info[slot_key] = slot_nodes
4866                 slot_nodes.add(pkg)
4867
4868         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
4869
4870                 mytype = pkg.type_name
4871                 myroot = pkg.root
4872                 mykey = pkg.cpv
4873                 metadata = pkg.metadata
4874                 myuse = pkg.use.enabled
4875                 jbigkey = pkg
4876                 depth = pkg.depth + 1
4877                 removal_action = "remove" in self.myparams
4878
4879                 edepend={}
4880                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
4881                 for k in depkeys:
4882                         edepend[k] = metadata[k]
4883
4884                 if not pkg.built and \
4885                         "--buildpkgonly" in self.myopts and \
4886                         "deep" not in self.myparams and \
4887                         "empty" not in self.myparams:
4888                         edepend["RDEPEND"] = ""
4889                         edepend["PDEPEND"] = ""
4890                 bdeps_satisfied = False
4891                 
4892                 if pkg.built and not removal_action:
4893                         if self.myopts.get("--with-bdeps", "n") == "y":
4894                                 # Pull in build time deps as requested, but marked them as
4895                                 # "satisfied" since they are not strictly required. This allows
4896                                 # more freedom in the merge order calculation for solving
4897                                 # circular dependencies. Don't convert to PDEPEND since that
4898                                 # could make --with-bdeps=y less effective if it is used to
4899                                 # adjust merge order to prevent built_with_use() calls from
4900                                 # failing.
4901                                 bdeps_satisfied = True
4902                         else:
4903                                 # built packages do not have build time dependencies.
4904                                 edepend["DEPEND"] = ""
4905
4906                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
4907                         edepend["DEPEND"] = ""
4908
4909                 deps = (
4910                         ("/", edepend["DEPEND"],
4911                                 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
4912                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
4913                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
4914                 )
4915
4916                 debug = "--debug" in self.myopts
4917                 strict = mytype != "installed"
4918                 try:
4919                         for dep_root, dep_string, dep_priority in deps:
4920                                 if pkg.onlydeps:
4921                                         # Decrease priority so that --buildpkgonly
4922                                         # hasallzeros() works correctly.
4923                                         dep_priority = DepPriority()
4924                                 if not dep_string:
4925                                         continue
4926                                 if debug:
4927                                         print
4928                                         print "Parent:   ", jbigkey
4929                                         print "Depstring:", dep_string
4930                                         print "Priority:", dep_priority
4931                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
4932                                 try:
4933                                         selected_atoms = self._select_atoms(dep_root,
4934                                                 dep_string, myuse=myuse, parent=pkg, strict=strict)
4935                                 except portage.exception.InvalidDependString, e:
4936                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
4937                                         return 0
4938                                 if debug:
4939                                         print "Candidates:", selected_atoms
4940
4941                                 for atom in selected_atoms:
4942                                         try:
4943
4944                                                 atom = portage.dep.Atom(atom)
4945
4946                                                 mypriority = dep_priority.copy()
4947                                                 if not atom.blocker and vardb.match(atom):
4948                                                         mypriority.satisfied = True
4949
4950                                                 if not self._add_dep(Dependency(atom=atom,
4951                                                         blocker=atom.blocker, depth=depth, parent=pkg,
4952                                                         priority=mypriority, root=dep_root),
4953                                                         allow_unsatisfied=allow_unsatisfied):
4954                                                         return 0
4955
4956                                         except portage.exception.InvalidAtom, e:
4957                                                 show_invalid_depstring_notice(
4958                                                         pkg, dep_string, str(e))
4959                                                 del e
4960                                                 if not pkg.installed:
4961                                                         return 0
4962
4963                                 if debug:
4964                                         print "Exiting...", jbigkey
4965                 except portage.exception.AmbiguousPackageName, e:
4966                         pkgs = e.args[0]
4967                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
4968                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
4969                         for cpv in pkgs:
4970                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
4971                         portage.writemsg("\n", noiselevel=-1)
4972                         if mytype == "binary":
4973                                 portage.writemsg(
4974                                         "!!! This binary package cannot be installed: '%s'\n" % \
4975                                         mykey, noiselevel=-1)
4976                         elif mytype == "ebuild":
4977                                 portdb = self.roots[myroot].trees["porttree"].dbapi
4978                                 myebuild, mylocation = portdb.findname2(mykey)
4979                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
4980                                         "'%s'\n" % myebuild, noiselevel=-1)
4981                         portage.writemsg("!!! Please notify the package maintainer " + \
4982                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
4983                         return 0
4984                 return 1
4985
4986         def _priority(self, **kwargs):
4987                 if "remove" in self.myparams:
4988                         priority_constructor = UnmergeDepPriority
4989                 else:
4990                         priority_constructor = DepPriority
4991                 return priority_constructor(**kwargs)
4992
4993         def _dep_expand(self, root_config, atom_without_category):
4994                 """
4995                 @param root_config: a root config instance
4996                 @type root_config: RootConfig
4997                 @param atom_without_category: an atom without a category component
4998                 @type atom_without_category: String
4999                 @rtype: list
5000                 @returns: a list of atoms containing categories (possibly empty)
5001                 """
5002                 null_cp = portage.dep_getkey(insert_category_into_atom(
5003                         atom_without_category, "null"))
5004                 cat, atom_pn = portage.catsplit(null_cp)
5005
5006                 cp_set = set()
5007                 for db, pkg_type, built, installed, db_keys in \
5008                         self._filtered_trees[root_config.root]["dbs"]:
5009                         cp_set.update(db.cp_all())
5010                 for cp in list(cp_set):
5011                         cat, pn = portage.catsplit(cp)
5012                         if pn != atom_pn:
5013                                 cp_set.discard(cp)
5014                 deps = []
5015                 for cp in cp_set:
5016                         cat, pn = portage.catsplit(cp)
5017                         deps.append(insert_category_into_atom(
5018                                 atom_without_category, cat))
5019                 return deps
5020
5021         def _have_new_virt(self, root, atom_cp):
5022                 ret = False
5023                 for db, pkg_type, built, installed, db_keys in \
5024                         self._filtered_trees[root]["dbs"]:
5025                         if db.cp_list(atom_cp):
5026                                 ret = True
5027                                 break
5028                 return ret
5029
5030         def _iter_atoms_for_pkg(self, pkg):
5031                 # TODO: add multiple $ROOT support
5032                 if pkg.root != self.target_root:
5033                         return
5034                 atom_arg_map = self._atom_arg_map
5035                 root_config = self.roots[pkg.root]
5036                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5037                         atom_cp = portage.dep_getkey(atom)
5038                         if atom_cp != pkg.cp and \
5039                                 self._have_new_virt(pkg.root, atom_cp):
5040                                 continue
5041                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5042                         visible_pkgs.reverse() # descending order
5043                         higher_slot = None
5044                         for visible_pkg in visible_pkgs:
5045                                 if visible_pkg.cp != atom_cp:
5046                                         continue
5047                                 if pkg >= visible_pkg:
5048                                         # This is descending order, and we're not
5049                                         # interested in any versions <= pkg given.
5050                                         break
5051                                 if pkg.slot_atom != visible_pkg.slot_atom:
5052                                         higher_slot = visible_pkg
5053                                         break
5054                         if higher_slot is not None:
5055                                 continue
5056                         for arg in atom_arg_map[(atom, pkg.root)]:
5057                                 if isinstance(arg, PackageArg) and \
5058                                         arg.package != pkg:
5059                                         continue
5060                                 yield arg, atom
5061
5062         def select_files(self, myfiles):
5063                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5064                 appropriate depgraph and return a favorite list."""
5065                 debug = "--debug" in self.myopts
5066                 root_config = self.roots[self.target_root]
5067                 sets = root_config.sets
5068                 getSetAtoms = root_config.setconfig.getSetAtoms
5069                 myfavorites=[]
5070                 myroot = self.target_root
5071                 dbs = self._filtered_trees[myroot]["dbs"]
5072                 vardb = self.trees[myroot]["vartree"].dbapi
5073                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5074                 portdb = self.trees[myroot]["porttree"].dbapi
5075                 bindb = self.trees[myroot]["bintree"].dbapi
5076                 pkgsettings = self.pkgsettings[myroot]
5077                 args = []
5078                 onlydeps = "--onlydeps" in self.myopts
5079                 lookup_owners = []
5080                 for x in myfiles:
5081                         ext = os.path.splitext(x)[1]
5082                         if ext==".tbz2":
5083                                 if not os.path.exists(x):
5084                                         if os.path.exists(
5085                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5086                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5087                                         elif os.path.exists(
5088                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5089                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5090                                         else:
5091                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5092                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5093                                                 return 0, myfavorites
5094                                 mytbz2=portage.xpak.tbz2(x)
5095                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5096                                 if os.path.realpath(x) != \
5097                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5098                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5099                                         return 0, myfavorites
5100                                 db_keys = list(bindb._aux_cache_keys)
5101                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5102                                 pkg = Package(type_name="binary", root_config=root_config,
5103                                         cpv=mykey, built=True, metadata=metadata,
5104                                         onlydeps=onlydeps)
5105                                 self._pkg_cache[pkg] = pkg
5106                                 args.append(PackageArg(arg=x, package=pkg,
5107                                         root_config=root_config))
5108                         elif ext==".ebuild":
5109                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5110                                 pkgdir = os.path.dirname(ebuild_path)
5111                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5112                                 cp = pkgdir[len(tree_root)+1:]
5113                                 e = portage.exception.PackageNotFound(
5114                                         ("%s is not in a valid portage tree " + \
5115                                         "hierarchy or does not exist") % x)
5116                                 if not portage.isvalidatom(cp):
5117                                         raise e
5118                                 cat = portage.catsplit(cp)[0]
5119                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5120                                 if not portage.isvalidatom("="+mykey):
5121                                         raise e
5122                                 ebuild_path = portdb.findname(mykey)
5123                                 if ebuild_path:
5124                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5125                                                 cp, os.path.basename(ebuild_path)):
5126                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5127                                                 return 0, myfavorites
5128                                         if mykey not in portdb.xmatch(
5129                                                 "match-visible", portage.dep_getkey(mykey)):
5130                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5131                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5132                                                 print colorize("BAD", "*** page for details.")
5133                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5134                                                         "Continuing...")
5135                                 else:
5136                                         raise portage.exception.PackageNotFound(
5137                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5138                                 db_keys = list(portdb._aux_cache_keys)
5139                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5140                                 pkg = Package(type_name="ebuild", root_config=root_config,
5141                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5142                                 pkgsettings.setcpv(pkg)
5143                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5144                                 self._pkg_cache[pkg] = pkg
5145                                 args.append(PackageArg(arg=x, package=pkg,
5146                                         root_config=root_config))
5147                         elif x.startswith(os.path.sep):
5148                                 if not x.startswith(myroot):
5149                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5150                                                 " $ROOT.\n") % x, noiselevel=-1)
5151                                         return 0, []
5152                                 # Queue these up since it's most efficient to handle
5153                                 # multiple files in a single iter_owners() call.
5154                                 lookup_owners.append(x)
5155                         else:
5156                                 if x in ("system", "world"):
5157                                         x = SETPREFIX + x
5158                                 if x.startswith(SETPREFIX):
5159                                         s = x[len(SETPREFIX):]
5160                                         if s not in sets:
5161                                                 raise portage.exception.PackageSetNotFound(s)
5162                                         if s in self._sets:
5163                                                 continue
5164                                         # Recursively expand sets so that containment tests in
5165                                         # self._get_parent_sets() properly match atoms in nested
5166                                         # sets (like if world contains system).
5167                                         expanded_set = InternalPackageSet(
5168                                                 initial_atoms=getSetAtoms(s))
5169                                         self._sets[s] = expanded_set
5170                                         args.append(SetArg(arg=x, set=expanded_set,
5171                                                 root_config=root_config))
5172                                         myfavorites.append(x)
5173                                         continue
5174                                 if not is_valid_package_atom(x):
5175                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5176                                                 noiselevel=-1)
5177                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5178                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5179                                         return (0,[])
5180                                 # Don't expand categories or old-style virtuals here unless
5181                                 # necessary. Expansion of old-style virtuals here causes at
5182                                 # least the following problems:
5183                                 #   1) It's more difficult to determine which set(s) an atom
5184                                 #      came from, if any.
5185                                 #   2) It takes away freedom from the resolver to choose other
5186                                 #      possible expansions when necessary.
5187                                 if "/" in x:
5188                                         args.append(AtomArg(arg=x, atom=x,
5189                                                 root_config=root_config))
5190                                         continue
5191                                 expanded_atoms = self._dep_expand(root_config, x)
5192                                 installed_cp_set = set()
5193                                 for atom in expanded_atoms:
5194                                         atom_cp = portage.dep_getkey(atom)
5195                                         if vardb.cp_list(atom_cp):
5196                                                 installed_cp_set.add(atom_cp)
5197                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5198                                         installed_cp = iter(installed_cp_set).next()
5199                                         expanded_atoms = [atom for atom in expanded_atoms \
5200                                                 if portage.dep_getkey(atom) == installed_cp]
5201
5202                                 if len(expanded_atoms) > 1:
5203                                         print
5204                                         print
5205                                         ambiguous_package_name(x, expanded_atoms, root_config,
5206                                                 self.spinner, self.myopts)
5207                                         return False, myfavorites
5208                                 if expanded_atoms:
5209                                         atom = expanded_atoms[0]
5210                                 else:
5211                                         null_atom = insert_category_into_atom(x, "null")
5212                                         null_cp = portage.dep_getkey(null_atom)
5213                                         cat, atom_pn = portage.catsplit(null_cp)
5214                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5215                                         if virts_p:
5216                                                 # Allow the depgraph to choose which virtual.
5217                                                 atom = insert_category_into_atom(x, "virtual")
5218                                         else:
5219                                                 atom = insert_category_into_atom(x, "null")
5220
5221                                 args.append(AtomArg(arg=x, atom=atom,
5222                                         root_config=root_config))
5223
5224                 if lookup_owners:
5225                         relative_paths = []
5226                         search_for_multiple = False
5227                         if len(lookup_owners) > 1:
5228                                 search_for_multiple = True
5229
5230                         for x in lookup_owners:
5231                                 if not search_for_multiple and os.path.isdir(x):
5232                                         search_for_multiple = True
5233                                 relative_paths.append(x[len(myroot):])
5234
5235                         owners = set()
5236                         for pkg, relative_path in \
5237                                 real_vardb._owners.iter_owners(relative_paths):
5238                                 owners.add(pkg.mycpv)
5239                                 if not search_for_multiple:
5240                                         break
5241
5242                         if not owners:
5243                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5244                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5245                                 return 0, []
5246
5247                         for cpv in owners:
5248                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5249                                 if not slot:
5250                                         # portage now masks packages with missing slot, but it's
5251                                         # possible that one was installed by an older version
5252                                         atom = portage.cpv_getkey(cpv)
5253                                 else:
5254                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5255                                 args.append(AtomArg(arg=atom, atom=atom,
5256                                         root_config=root_config))
5257
5258                 if "--update" in self.myopts:
5259                         # Enable greedy SLOT atoms for atoms given as arguments.
5260                         # This is currently disabled for sets since greedy SLOT
5261                         # atoms could be a property of the set itself.
5262                         greedy_atoms = []
5263                         for arg in args:
5264                                 # In addition to any installed slots, also try to pull
5265                                 # in the latest new slot that may be available.
5266                                 greedy_atoms.append(arg)
5267                                 if not isinstance(arg, (AtomArg, PackageArg)):
5268                                         continue
5269                                 atom_cp = portage.dep_getkey(arg.atom)
5270                                 slots = set()
5271                                 for cpv in vardb.match(arg.atom):
5272                                         slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5273                                 for slot in slots:
5274                                         greedy_atoms.append(
5275                                                 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5276                                                         root_config=root_config))
5277                         args = greedy_atoms
5278                         del greedy_atoms
5279
5280                 # Create the "args" package set from atoms and
5281                 # packages given as arguments.
5282                 args_set = self._sets["args"]
5283                 for arg in args:
5284                         if not isinstance(arg, (AtomArg, PackageArg)):
5285                                 continue
5286                         myatom = arg.atom
5287                         if myatom in args_set:
5288                                 continue
5289                         args_set.add(myatom)
5290                         myfavorites.append(myatom)
5291                 self._set_atoms.update(chain(*self._sets.itervalues()))
5292                 atom_arg_map = self._atom_arg_map
5293                 for arg in args:
5294                         for atom in arg.set:
5295                                 atom_key = (atom, myroot)
5296                                 refs = atom_arg_map.get(atom_key)
5297                                 if refs is None:
5298                                         refs = []
5299                                         atom_arg_map[atom_key] = refs
5300                                         if arg not in refs:
5301                                                 refs.append(arg)
5302                 pprovideddict = pkgsettings.pprovideddict
5303                 if debug:
5304                         portage.writemsg("\n", noiselevel=-1)
5305                 # Order needs to be preserved since a feature of --nodeps
5306                 # is to allow the user to force a specific merge order.
5307                 args.reverse()
5308                 while args:
5309                         arg = args.pop()
5310                         for atom in arg.set:
5311                                 self.spinner.update()
5312                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5313                                         root=myroot, parent=arg)
5314                                 atom_cp = portage.dep_getkey(atom)
5315                                 try:
5316                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5317                                         if pprovided and portage.match_from_list(atom, pprovided):
5318                                                 # A provided package has been specified on the command line.
5319                                                 self._pprovided_args.append((arg, atom))
5320                                                 continue
5321                                         if isinstance(arg, PackageArg):
5322                                                 if not self._add_pkg(arg.package, dep) or \
5323                                                         not self._create_graph():
5324                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5325                                                                 "dependencies for %s\n") % arg.arg)
5326                                                         return 0, myfavorites
5327                                                 continue
5328                                         if debug:
5329                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5330                                                         (arg, atom), noiselevel=-1)
5331                                         pkg, existing_node = self._select_package(
5332                                                 myroot, atom, onlydeps=onlydeps)
5333                                         if not pkg:
5334                                                 if not (isinstance(arg, SetArg) and \
5335                                                         arg.name in ("system", "world")):
5336                                                         self._unsatisfied_deps_for_display.append(
5337                                                                 ((myroot, atom), {}))
5338                                                         return 0, myfavorites
5339                                                 self._missing_args.append((arg, atom))
5340                                                 continue
5341                                         if atom_cp != pkg.cp:
5342                                                 # For old-style virtuals, we need to repeat the
5343                                                 # package.provided check against the selected package.
5344                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5345                                                 pprovided = pprovideddict.get(pkg.cp)
5346                                                 if pprovided and \
5347                                                         portage.match_from_list(expanded_atom, pprovided):
5348                                                         # A provided package has been
5349                                                         # specified on the command line.
5350                                                         self._pprovided_args.append((arg, atom))
5351                                                         continue
5352                                         if pkg.installed and "selective" not in self.myparams:
5353                                                 self._unsatisfied_deps_for_display.append(
5354                                                         ((myroot, atom), {}))
5355                                                 # Previous behavior was to bail out in this case, but
5356                                                 # since the dep is satisfied by the installed package,
5357                                                 # it's more friendly to continue building the graph
5358                                                 # and just show a warning message. Therefore, only bail
5359                                                 # out here if the atom is not from either the system or
5360                                                 # world set.
5361                                                 if not (isinstance(arg, SetArg) and \
5362                                                         arg.name in ("system", "world")):
5363                                                         return 0, myfavorites
5364
5365                                         # Add the selected package to the graph as soon as possible
5366                                         # so that later dep_check() calls can use it as feedback
5367                                         # for making more consistent atom selections.
5368                                         if not self._add_pkg(pkg, dep):
5369                                                 if isinstance(arg, SetArg):
5370                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5371                                                                 "dependencies for %s from %s\n") % \
5372                                                                 (atom, arg.arg))
5373                                                 else:
5374                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5375                                                                 "dependencies for %s\n") % atom)
5376                                                 return 0, myfavorites
5377
5378                                 except portage.exception.MissingSignature, e:
5379                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5380                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5381                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5382                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5383                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5384                                         return 0, myfavorites
5385                                 except portage.exception.InvalidSignature, e:
5386                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5387                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5388                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5389                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5390                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5391                                         return 0, myfavorites
5392                                 except SystemExit, e:
5393                                         raise # Needed else can't exit
5394                                 except Exception, e:
5395                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5396                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5397                                         raise
5398
5399                 # Now that the root packages have been added to the graph,
5400                 # process the dependencies.
5401                 if not self._create_graph():
5402                         return 0, myfavorites
5403
5404                 missing=0
5405                 if "--usepkgonly" in self.myopts:
5406                         for xs in self.digraph.all_nodes():
5407                                 if not isinstance(xs, Package):
5408                                         continue
5409                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5410                                         if missing == 0:
5411                                                 print
5412                                         missing += 1
5413                                         print "Missing binary for:",xs[2]
5414
5415                 try:
5416                         self.altlist()
5417                 except self._unknown_internal_error:
5418                         return False, myfavorites
5419
5420                 # We're true here unless we are missing binaries.
5421                 return (not missing,myfavorites)
5422
5423         def _select_atoms_from_graph(self, *pargs, **kwargs):
5424                 """
5425                 Prefer atoms matching packages that have already been
5426                 added to the graph or those that are installed and have
5427                 not been scheduled for replacement.
5428                 """
5429                 kwargs["trees"] = self._graph_trees
5430                 return self._select_atoms_highest_available(*pargs, **kwargs)
5431
5432         def _select_atoms_highest_available(self, root, depstring,
5433                 myuse=None, parent=None, strict=True, trees=None):
5434                 """This will raise InvalidDependString if necessary. If trees is
5435                 None then self._filtered_trees is used."""
5436                 pkgsettings = self.pkgsettings[root]
5437                 if trees is None:
5438                         trees = self._filtered_trees
5439                 if True:
5440                         try:
5441                                 if parent is not None:
5442                                         trees[root]["parent"] = parent
5443                                 if not strict:
5444                                         portage.dep._dep_check_strict = False
5445                                 mycheck = portage.dep_check(depstring, None,
5446                                         pkgsettings, myuse=myuse,
5447                                         myroot=root, trees=trees)
5448                         finally:
5449                                 if parent is not None:
5450                                         trees[root].pop("parent")
5451                                 portage.dep._dep_check_strict = True
5452                         if not mycheck[0]:
5453                                 raise portage.exception.InvalidDependString(mycheck[1])
5454                         selected_atoms = mycheck[1]
5455                 return selected_atoms
5456
5457         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5458                 atom = portage.dep.Atom(atom)
5459                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5460                 atom_without_use = atom
5461                 if atom.use:
5462                         atom_without_use = portage.dep.remove_slot(atom)
5463                         if atom.slot:
5464                                 atom_without_use += ":" + atom.slot
5465                         atom_without_use = portage.dep.Atom(atom_without_use)
5466                 xinfo = '"%s"' % atom
5467                 if arg:
5468                         xinfo='"%s"' % arg
5469                 # Discard null/ from failed cpv_expand category expansion.
5470                 xinfo = xinfo.replace("null/", "")
5471                 masked_packages = []
5472                 missing_use = []
5473                 missing_licenses = []
5474                 have_eapi_mask = False
5475                 pkgsettings = self.pkgsettings[root]
5476                 implicit_iuse = pkgsettings._get_implicit_iuse()
5477                 root_config = self.roots[root]
5478                 portdb = self.roots[root].trees["porttree"].dbapi
5479                 dbs = self._filtered_trees[root]["dbs"]
5480                 for db, pkg_type, built, installed, db_keys in dbs:
5481                         if installed:
5482                                 continue
5483                         match = db.match
5484                         if hasattr(db, "xmatch"):
5485                                 cpv_list = db.xmatch("match-all", atom_without_use)
5486                         else:
5487                                 cpv_list = db.match(atom_without_use)
5488                         # descending order
5489                         cpv_list.reverse()
5490                         for cpv in cpv_list:
5491                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5492                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5493                                 if metadata is not None:
5494                                         pkg = Package(built=built, cpv=cpv,
5495                                                 installed=installed, metadata=metadata,
5496                                                 root_config=root_config)
5497                                         if pkg.cp != atom.cp:
5498                                                 # A cpv can be returned from dbapi.match() as an
5499                                                 # old-style virtual match even in cases when the
5500                                                 # package does not actually PROVIDE the virtual.
5501                                                 # Filter out any such false matches here.
5502                                                 if not atom_set.findAtomForPackage(pkg):
5503                                                         continue
5504                                         if atom.use and not mreasons:
5505                                                 missing_use.append(pkg)
5506                                                 continue
5507                                 masked_packages.append(
5508                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5509
5510                 missing_use_reasons = []
5511                 missing_iuse_reasons = []
5512                 for pkg in missing_use:
5513                         use = pkg.use.enabled
5514                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5515                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5516                         missing_iuse = []
5517                         for x in atom.use.required:
5518                                 if iuse_re.match(x) is None:
5519                                         missing_iuse.append(x)
5520                         mreasons = []
5521                         if missing_iuse:
5522                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5523                                 missing_iuse_reasons.append((pkg, mreasons))
5524                         else:
5525                                 need_enable = sorted(atom.use.enabled.difference(use))
5526                                 need_disable = sorted(atom.use.disabled.intersection(use))
5527                                 if need_enable or need_disable:
5528                                         changes = []
5529                                         changes.extend(colorize("red", "+" + x) \
5530                                                 for x in need_enable)
5531                                         changes.extend(colorize("blue", "-" + x) \
5532                                                 for x in need_disable)
5533                                         mreasons.append("Change USE: %s" % " ".join(changes))
5534                                         missing_use_reasons.append((pkg, mreasons))
5535
5536                 if missing_iuse_reasons and not missing_use_reasons:
5537                         missing_use_reasons = missing_iuse_reasons
5538                 elif missing_use_reasons:
5539                         # Only show the latest version.
5540                         del missing_use_reasons[1:]
5541
5542                 if missing_use_reasons:
5543                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5544                         print "!!! One of the following packages is required to complete your request:"
5545                         for pkg, mreasons in missing_use_reasons:
5546                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5547
5548                 elif masked_packages:
5549                         print "\n!!! " + \
5550                                 colorize("BAD", "All ebuilds that could satisfy ") + \
5551                                 colorize("INFORM", xinfo) + \
5552                                 colorize("BAD", " have been masked.")
5553                         print "!!! One of the following masked packages is required to complete your request:"
5554                         have_eapi_mask = show_masked_packages(masked_packages)
5555                         if have_eapi_mask:
5556                                 print
5557                                 msg = ("The current version of portage supports " + \
5558                                         "EAPI '%s'. You must upgrade to a newer version" + \
5559                                         " of portage before EAPI masked packages can" + \
5560                                         " be installed.") % portage.const.EAPI
5561                                 from textwrap import wrap
5562                                 for line in wrap(msg, 75):
5563                                         print line
5564                         print
5565                         show_mask_docs()
5566                 else:
5567                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5568
5569                 # Show parent nodes and the argument that pulled them in.
5570                 traversed_nodes = set()
5571                 node = myparent
5572                 msg = []
5573                 while node is not None:
5574                         traversed_nodes.add(node)
5575                         msg.append('(dependency required by "%s" [%s])' % \
5576                                 (colorize('INFORM', str(node.cpv)), node.type_name))
5577                         # When traversing to parents, prefer arguments over packages
5578                         # since arguments are root nodes. Never traverse the same
5579                         # package twice, in order to prevent an infinite loop.
5580                         selected_parent = None
5581                         for parent in self.digraph.parent_nodes(node):
5582                                 if isinstance(parent, DependencyArg):
5583                                         msg.append('(dependency required by "%s" [argument])' % \
5584                                                 (colorize('INFORM', str(parent))))
5585                                         selected_parent = None
5586                                         break
5587                                 if parent not in traversed_nodes:
5588                                         selected_parent = parent
5589                         node = selected_parent
5590                 for line in msg:
5591                         print line
5592
5593                 print
5594
5595         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5596                 cache_key = (root, atom, onlydeps)
5597                 ret = self._highest_pkg_cache.get(cache_key)
5598                 if ret is not None:
5599                         pkg, existing = ret
5600                         if pkg and not existing:
5601                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5602                                 if existing and existing == pkg:
5603                                         # Update the cache to reflect that the
5604                                         # package has been added to the graph.
5605                                         ret = pkg, pkg
5606                                         self._highest_pkg_cache[cache_key] = ret
5607                         return ret
5608                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5609                 self._highest_pkg_cache[cache_key] = ret
5610                 return ret
5611
5612         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5613                 root_config = self.roots[root]
5614                 pkgsettings = self.pkgsettings[root]
5615                 dbs = self._filtered_trees[root]["dbs"]
5616                 vardb = self.roots[root].trees["vartree"].dbapi
5617                 portdb = self.roots[root].trees["porttree"].dbapi
5618                 # List of acceptable packages, ordered by type preference.
5619                 matched_packages = []
5620                 highest_version = None
5621                 if not isinstance(atom, portage.dep.Atom):
5622                         atom = portage.dep.Atom(atom)
5623                 atom_cp = atom.cp
5624                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5625                 existing_node = None
5626                 myeb = None
5627                 usepkgonly = "--usepkgonly" in self.myopts
5628                 empty = "empty" in self.myparams
5629                 selective = "selective" in self.myparams
5630                 reinstall = False
5631                 noreplace = "--noreplace" in self.myopts
5632                 # Behavior of the "selective" parameter depends on
5633                 # whether or not a package matches an argument atom.
5634                 # If an installed package provides an old-style
5635                 # virtual that is no longer provided by an available
5636                 # package, the installed package may match an argument
5637                 # atom even though none of the available packages do.
5638                 # Therefore, "selective" logic does not consider
5639                 # whether or not an installed package matches an
5640                 # argument atom. It only considers whether or not
5641                 # available packages match argument atoms, which is
5642                 # represented by the found_available_arg flag.
5643                 found_available_arg = False
5644                 for find_existing_node in True, False:
5645                         if existing_node:
5646                                 break
5647                         for db, pkg_type, built, installed, db_keys in dbs:
5648                                 if existing_node:
5649                                         break
5650                                 if installed and not find_existing_node:
5651                                         want_reinstall = reinstall or empty or \
5652                                                 (found_available_arg and not selective)
5653                                         if want_reinstall and matched_packages:
5654                                                 continue
5655                                 if hasattr(db, "xmatch"):
5656                                         cpv_list = db.xmatch("match-all", atom)
5657                                 else:
5658                                         cpv_list = db.match(atom)
5659
5660                                 # USE=multislot can make an installed package appear as if
5661                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5662                                 # won't do any good as long as USE=multislot is enabled since
5663                                 # the newly built package still won't have the expected slot.
5664                                 # Therefore, assume that such SLOT dependencies are already
5665                                 # satisfied rather than forcing a rebuild.
5666                                 if installed and not cpv_list and atom.slot:
5667                                         for cpv in db.match(atom.cp):
5668                                                 slot_available = False
5669                                                 for other_db, other_type, other_built, \
5670                                                         other_installed, other_keys in dbs:
5671                                                         try:
5672                                                                 if atom.slot == \
5673                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
5674                                                                         slot_available = True
5675                                                                         break
5676                                                         except KeyError:
5677                                                                 pass
5678                                                 if not slot_available:
5679                                                         continue
5680                                                 inst_pkg = self._pkg(cpv, "installed",
5681                                                         root_config, installed=installed)
5682                                                 # Remove the slot from the atom and verify that
5683                                                 # the package matches the resulting atom.
5684                                                 atom_without_slot = portage.dep.remove_slot(atom)
5685                                                 if atom.use:
5686                                                         atom_without_slot += str(atom.use)
5687                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
5688                                                 if portage.match_from_list(
5689                                                         atom_without_slot, [inst_pkg]):
5690                                                         cpv_list = [inst_pkg.cpv]
5691                                                 break
5692
5693                                 if not cpv_list:
5694                                         continue
5695                                 pkg_status = "merge"
5696                                 if installed or onlydeps:
5697                                         pkg_status = "nomerge"
5698                                 # descending order
5699                                 cpv_list.reverse()
5700                                 for cpv in cpv_list:
5701                                         # Make --noreplace take precedence over --newuse.
5702                                         if not installed and noreplace and \
5703                                                 cpv in vardb.match(atom):
5704                                                 # If the installed version is masked, it may
5705                                                 # be necessary to look at lower versions,
5706                                                 # in case there is a visible downgrade.
5707                                                 continue
5708                                         reinstall_for_flags = None
5709                                         cache_key = (pkg_type, root, cpv, pkg_status)
5710                                         calculated_use = True
5711                                         pkg = self._pkg_cache.get(cache_key)
5712                                         if pkg is None:
5713                                                 calculated_use = False
5714                                                 try:
5715                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5716                                                 except KeyError:
5717                                                         continue
5718                                                 pkg = Package(built=built, cpv=cpv,
5719                                                         installed=installed, metadata=metadata,
5720                                                         onlydeps=onlydeps, root_config=root_config,
5721                                                         type_name=pkg_type)
5722                                                 metadata = pkg.metadata
5723                                                 if not built and ("?" in metadata["LICENSE"] or \
5724                                                         "?" in metadata["PROVIDE"]):
5725                                                         # This is avoided whenever possible because
5726                                                         # it's expensive. It only needs to be done here
5727                                                         # if it has an effect on visibility.
5728                                                         pkgsettings.setcpv(pkg)
5729                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
5730                                                         calculated_use = True
5731                                                 self._pkg_cache[pkg] = pkg
5732
5733                                         if not installed or (installed and matched_packages):
5734                                                 # Only enforce visibility on installed packages
5735                                                 # if there is at least one other visible package
5736                                                 # available. By filtering installed masked packages
5737                                                 # here, packages that have been masked since they
5738                                                 # were installed can be automatically downgraded
5739                                                 # to an unmasked version.
5740                                                 try:
5741                                                         if not visible(pkgsettings, pkg):
5742                                                                 continue
5743                                                 except portage.exception.InvalidDependString:
5744                                                         if not installed:
5745                                                                 continue
5746
5747                                                 # Enable upgrade or downgrade to a version
5748                                                 # with visible KEYWORDS when the installed
5749                                                 # version is masked by KEYWORDS, but never
5750                                                 # reinstall the same exact version only due
5751                                                 # to a KEYWORDS mask.
5752                                                 if installed and matched_packages and \
5753                                                         pkgsettings._getMissingKeywords(
5754                                                         pkg.cpv, pkg.metadata):
5755                                                         different_version = None
5756                                                         for avail_pkg in matched_packages:
5757                                                                 if not portage.dep.cpvequal(
5758                                                                         pkg.cpv, avail_pkg.cpv):
5759                                                                         different_version = avail_pkg
5760                                                                         break
5761                                                         if different_version is not None:
5762                                                                 # Only reinstall for KEYWORDS if
5763                                                                 # it's not the same version.
5764                                                                 continue
5765
5766                                         if not pkg.built and not calculated_use:
5767                                                 # This is avoided whenever possible because
5768                                                 # it's expensive.
5769                                                 pkgsettings.setcpv(pkg)
5770                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5771
5772                                         if pkg.cp != atom.cp:
5773                                                 # A cpv can be returned from dbapi.match() as an
5774                                                 # old-style virtual match even in cases when the
5775                                                 # package does not actually PROVIDE the virtual.
5776                                                 # Filter out any such false matches here.
5777                                                 if not atom_set.findAtomForPackage(pkg):
5778                                                         continue
5779
5780                                         myarg = None
5781                                         if root == self.target_root:
5782                                                 try:
5783                                                         # Ebuild USE must have been calculated prior
5784                                                         # to this point, in case atoms have USE deps.
5785                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
5786                                                 except StopIteration:
5787                                                         pass
5788                                                 except portage.exception.InvalidDependString:
5789                                                         if not installed:
5790                                                                 # masked by corruption
5791                                                                 continue
5792                                         if not installed and myarg:
5793                                                 found_available_arg = True
5794
5795                                         if atom.use and not pkg.built:
5796                                                 use = pkg.use.enabled
5797                                                 if atom.use.enabled.difference(use):
5798                                                         continue
5799                                                 if atom.use.disabled.intersection(use):
5800                                                         continue
5801                                         if pkg.cp == atom_cp:
5802                                                 if highest_version is None:
5803                                                         highest_version = pkg
5804                                                 elif pkg > highest_version:
5805                                                         highest_version = pkg
5806                                         # At this point, we've found the highest visible
5807                                         # match from the current repo. Any lower versions
5808                                         # from this repo are ignored, so this so the loop
5809                                         # will always end with a break statement below
5810                                         # this point.
5811                                         if find_existing_node:
5812                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5813                                                 if not e_pkg:
5814                                                         break
5815                                                 if portage.dep.match_from_list(atom, [e_pkg]):
5816                                                         if highest_version and \
5817                                                                 e_pkg.cp == atom_cp and \
5818                                                                 e_pkg < highest_version and \
5819                                                                 e_pkg.slot_atom != highest_version.slot_atom:
5820                                                                 # There is a higher version available in a
5821                                                                 # different slot, so this existing node is
5822                                                                 # irrelevant.
5823                                                                 pass
5824                                                         else:
5825                                                                 matched_packages.append(e_pkg)
5826                                                                 existing_node = e_pkg
5827                                                 break
5828                                         # Compare built package to current config and
5829                                         # reject the built package if necessary.
5830                                         if built and not installed and \
5831                                                 ("--newuse" in self.myopts or \
5832                                                 "--reinstall" in self.myopts):
5833                                                 iuses = pkg.iuse.all
5834                                                 old_use = pkg.use.enabled
5835                                                 if myeb:
5836                                                         pkgsettings.setcpv(myeb)
5837                                                 else:
5838                                                         pkgsettings.setcpv(pkg)
5839                                                 now_use = pkgsettings["PORTAGE_USE"].split()
5840                                                 forced_flags = set()
5841                                                 forced_flags.update(pkgsettings.useforce)
5842                                                 forced_flags.update(pkgsettings.usemask)
5843                                                 cur_iuse = iuses
5844                                                 if myeb and not usepkgonly:
5845                                                         cur_iuse = myeb.iuse.all
5846                                                 if self._reinstall_for_flags(forced_flags,
5847                                                         old_use, iuses,
5848                                                         now_use, cur_iuse):
5849                                                         break
5850                                         # Compare current config to installed package
5851                                         # and do not reinstall if possible.
5852                                         if not installed and \
5853                                                 ("--newuse" in self.myopts or \
5854                                                 "--reinstall" in self.myopts) and \
5855                                                 cpv in vardb.match(atom):
5856                                                 pkgsettings.setcpv(pkg)
5857                                                 forced_flags = set()
5858                                                 forced_flags.update(pkgsettings.useforce)
5859                                                 forced_flags.update(pkgsettings.usemask)
5860                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
5861                                                 old_iuse = set(filter_iuse_defaults(
5862                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
5863                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
5864                                                 cur_iuse = pkg.iuse.all
5865                                                 reinstall_for_flags = \
5866                                                         self._reinstall_for_flags(
5867                                                         forced_flags, old_use, old_iuse,
5868                                                         cur_use, cur_iuse)
5869                                                 if reinstall_for_flags:
5870                                                         reinstall = True
5871                                         if not built:
5872                                                 myeb = pkg
5873                                         matched_packages.append(pkg)
5874                                         if reinstall_for_flags:
5875                                                 self._reinstall_nodes[pkg] = \
5876                                                         reinstall_for_flags
5877                                         break
5878
5879                 if not matched_packages:
5880                         return None, None
5881
5882                 if "--debug" in self.myopts:
5883                         for pkg in matched_packages:
5884                                 portage.writemsg("%s %s\n" % \
5885                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
5886
5887                 # Filter out any old-style virtual matches if they are
5888                 # mixed with new-style virtual matches.
5889                 cp = portage.dep_getkey(atom)
5890                 if len(matched_packages) > 1 and \
5891                         "virtual" == portage.catsplit(cp)[0]:
5892                         for pkg in matched_packages:
5893                                 if pkg.cp != cp:
5894                                         continue
5895                                 # Got a new-style virtual, so filter
5896                                 # out any old-style virtuals.
5897                                 matched_packages = [pkg for pkg in matched_packages \
5898                                         if pkg.cp == cp]
5899                                 break
5900
5901                 # If the installed version is in a different slot and it is higher than
5902                 # the highest available visible package, _iter_atoms_for_pkg() may fail
5903                 # to properly match the available package with a corresponding argument
5904                 # atom. Detect this case and correct it here.
5905                 if not selective and len(matched_packages) > 1 and \
5906                         matched_packages[-1].installed and \
5907                         matched_packages[-1].slot_atom != \
5908                         matched_packages[-2].slot_atom and \
5909                         matched_packages[-1] > matched_packages[-2]:
5910                         pkg = matched_packages[-2]
5911                         if pkg.root == self.target_root and \
5912                                 self._set_atoms.findAtomForPackage(pkg):
5913                                 # Select the available package instead
5914                                 # of the installed package.
5915                                 matched_packages.pop()
5916
5917                 if len(matched_packages) > 1:
5918                         bestmatch = portage.best(
5919                                 [pkg.cpv for pkg in matched_packages])
5920                         matched_packages = [pkg for pkg in matched_packages \
5921                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
5922
5923                 # ordered by type preference ("ebuild" type is the last resort)
5924                 return  matched_packages[-1], existing_node
5925
5926         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
5927                 """
5928                 Select packages that have already been added to the graph or
5929                 those that are installed and have not been scheduled for
5930                 replacement.
5931                 """
5932                 graph_db = self._graph_trees[root]["porttree"].dbapi
5933                 matches = graph_db.match(atom)
5934                 if not matches:
5935                         return None, None
5936                 cpv = matches[-1] # highest match
5937                 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
5938                         graph_db.aux_get(cpv, ["SLOT"])[0])
5939                 e_pkg = self._slot_pkg_map[root].get(slot_atom)
5940                 if e_pkg:
5941                         return e_pkg, e_pkg
5942                 # Since this cpv exists in the graph_db,
5943                 # we must have a cached Package instance.
5944                 cache_key = ("installed", root, cpv, "nomerge")
5945                 return (self._pkg_cache[cache_key], None)
5946
5947         def _complete_graph(self):
5948                 """
5949                 Add any deep dependencies of required sets (args, system, world) that
5950                 have not been pulled into the graph yet. This ensures that the graph
5951                 is consistent such that initially satisfied deep dependencies are not
5952                 broken in the new graph. Initially unsatisfied dependencies are
5953                 irrelevant since we only want to avoid breaking dependencies that are
5954                 intially satisfied.
5955
5956                 Since this method can consume enough time to disturb users, it is
5957                 currently only enabled by the --complete-graph option.
5958                 """
5959                 if "--buildpkgonly" in self.myopts or \
5960                         "recurse" not in self.myparams:
5961                         return 1
5962
5963                 if "complete" not in self.myparams:
5964                         # Skip this to avoid consuming enough time to disturb users.
5965                         return 1
5966
5967                 # Put the depgraph into a mode that causes it to only
5968                 # select packages that have already been added to the
5969                 # graph or those that are installed and have not been
5970                 # scheduled for replacement. Also, toggle the "deep"
5971                 # parameter so that all dependencies are traversed and
5972                 # accounted for.
5973                 self._select_atoms = self._select_atoms_from_graph
5974                 self._select_package = self._select_pkg_from_graph
5975                 already_deep = "deep" in self.myparams
5976                 if not already_deep:
5977                         self.myparams.add("deep")
5978
5979                 for root in self.roots:
5980                         required_set_names = self._required_set_names.copy()
5981                         if root == self.target_root and \
5982                                 (already_deep or "empty" in self.myparams):
5983                                 required_set_names.difference_update(self._sets)
5984                         if not required_set_names and not self._ignored_deps:
5985                                 continue
5986                         root_config = self.roots[root]
5987                         setconfig = root_config.setconfig
5988                         args = []
5989                         # Reuse existing SetArg instances when available.
5990                         for arg in self.digraph.root_nodes():
5991                                 if not isinstance(arg, SetArg):
5992                                         continue
5993                                 if arg.root_config != root_config:
5994                                         continue
5995                                 if arg.name in required_set_names:
5996                                         args.append(arg)
5997                                         required_set_names.remove(arg.name)
5998                         # Create new SetArg instances only when necessary.
5999                         for s in required_set_names:
6000                                 expanded_set = InternalPackageSet(
6001                                         initial_atoms=setconfig.getSetAtoms(s))
6002                                 atom = SETPREFIX + s
6003                                 args.append(SetArg(arg=atom, set=expanded_set,
6004                                         root_config=root_config))
6005                         vardb = root_config.trees["vartree"].dbapi
6006                         for arg in args:
6007                                 for atom in arg.set:
6008                                         self._dep_stack.append(
6009                                                 Dependency(atom=atom, root=root, parent=arg))
6010                         if self._ignored_deps:
6011                                 self._dep_stack.extend(self._ignored_deps)
6012                                 self._ignored_deps = []
6013                         if not self._create_graph(allow_unsatisfied=True):
6014                                 return 0
6015                         # Check the unsatisfied deps to see if any initially satisfied deps
6016                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6017                         # deps are irrelevant since we only want to avoid breaking deps
6018                         # that are initially satisfied.
6019                         while self._unsatisfied_deps:
6020                                 dep = self._unsatisfied_deps.pop()
6021                                 matches = vardb.match_pkgs(dep.atom)
6022                                 if not matches:
6023                                         self._initially_unsatisfied_deps.append(dep)
6024                                         continue
6025                                 # An scheduled installation broke a deep dependency.
6026                                 # Add the installed package to the graph so that it
6027                                 # will be appropriately reported as a slot collision
6028                                 # (possibly solvable via backtracking).
6029                                 pkg = matches[-1] # highest match
6030                                 if not self._add_pkg(pkg, dep):
6031                                         return 0
6032                                 if not self._create_graph(allow_unsatisfied=True):
6033                                         return 0
6034                 return 1
6035
6036         def _pkg(self, cpv, type_name, root_config, installed=False):
6037                 """
6038                 Get a package instance from the cache, or create a new
6039                 one if necessary. Raises KeyError from aux_get if it
6040                 failures for some reason (package does not exist or is
6041                 corrupt).
6042                 """
6043                 operation = "merge"
6044                 if installed:
6045                         operation = "nomerge"
6046                 pkg = self._pkg_cache.get(
6047                         (type_name, root_config.root, cpv, operation))
6048                 if pkg is None:
6049                         tree_type = self.pkg_tree_map[type_name]
6050                         db = root_config.trees[tree_type].dbapi
6051                         db_keys = list(self._trees_orig[root_config.root][
6052                                 tree_type].dbapi._aux_cache_keys)
6053                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6054                         pkg = Package(cpv=cpv, metadata=metadata,
6055                                 root_config=root_config, installed=installed)
6056                         if type_name == "ebuild":
6057                                 settings = self.pkgsettings[root_config.root]
6058                                 settings.setcpv(pkg)
6059                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6060                         self._pkg_cache[pkg] = pkg
6061                 return pkg
6062
6063         def validate_blockers(self):
6064                 """Remove any blockers from the digraph that do not match any of the
6065                 packages within the graph.  If necessary, create hard deps to ensure
6066                 correct merge order such that mutually blocking packages are never
6067                 installed simultaneously."""
6068
6069                 if "--buildpkgonly" in self.myopts or \
6070                         "--nodeps" in self.myopts:
6071                         return True
6072
6073                 #if "deep" in self.myparams:
6074                 if True:
6075                         # Pull in blockers from all installed packages that haven't already
6076                         # been pulled into the depgraph.  This is not enabled by default
6077                         # due to the performance penalty that is incurred by all the
6078                         # additional dep_check calls that are required.
6079
6080                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6081                         for myroot in self.trees:
6082                                 vardb = self.trees[myroot]["vartree"].dbapi
6083                                 portdb = self.trees[myroot]["porttree"].dbapi
6084                                 pkgsettings = self.pkgsettings[myroot]
6085                                 final_db = self.mydbapi[myroot]
6086
6087                                 blocker_cache = BlockerCache(myroot, vardb)
6088                                 stale_cache = set(blocker_cache)
6089                                 for pkg in vardb:
6090                                         cpv = pkg.cpv
6091                                         stale_cache.discard(cpv)
6092                                         pkg_in_graph = self.digraph.contains(pkg)
6093
6094                                         # Check for masked installed packages. Only warn about
6095                                         # packages that are in the graph in order to avoid warning
6096                                         # about those that will be automatically uninstalled during
6097                                         # the merge process or by --depclean.
6098                                         if pkg in final_db:
6099                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6100                                                         self._masked_installed.add(pkg)
6101
6102                                         blocker_atoms = None
6103                                         blockers = None
6104                                         if pkg_in_graph:
6105                                                 blockers = []
6106                                                 try:
6107                                                         blockers.extend(
6108                                                                 self._blocker_parents.child_nodes(pkg))
6109                                                 except KeyError:
6110                                                         pass
6111                                                 try:
6112                                                         blockers.extend(
6113                                                                 self._irrelevant_blockers.child_nodes(pkg))
6114                                                 except KeyError:
6115                                                         pass
6116                                         if blockers is not None:
6117                                                 blockers = set(str(blocker.atom) \
6118                                                         for blocker in blockers)
6119
6120                                         # If this node has any blockers, create a "nomerge"
6121                                         # node for it so that they can be enforced.
6122                                         self.spinner.update()
6123                                         blocker_data = blocker_cache.get(cpv)
6124                                         if blocker_data is not None and \
6125                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6126                                                 blocker_data = None
6127
6128                                         # If blocker data from the graph is available, use
6129                                         # it to validate the cache and update the cache if
6130                                         # it seems invalid.
6131                                         if blocker_data is not None and \
6132                                                 blockers is not None:
6133                                                 if not blockers.symmetric_difference(
6134                                                         blocker_data.atoms):
6135                                                         continue
6136                                                 blocker_data = None
6137
6138                                         if blocker_data is None and \
6139                                                 blockers is not None:
6140                                                 # Re-use the blockers from the graph.
6141                                                 blocker_atoms = sorted(blockers)
6142                                                 counter = long(pkg.metadata["COUNTER"])
6143                                                 blocker_data = \
6144                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6145                                                 blocker_cache[pkg.cpv] = blocker_data
6146                                                 continue
6147
6148                                         if blocker_data:
6149                                                 blocker_atoms = blocker_data.atoms
6150                                         else:
6151                                                 # Use aux_get() to trigger FakeVartree global
6152                                                 # updates on *DEPEND when appropriate.
6153                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6154                                                 # It is crucial to pass in final_db here in order to
6155                                                 # optimize dep_check calls by eliminating atoms via
6156                                                 # dep_wordreduce and dep_eval calls.
6157                                                 try:
6158                                                         portage.dep._dep_check_strict = False
6159                                                         try:
6160                                                                 success, atoms = portage.dep_check(depstr,
6161                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6162                                                                         trees=self._graph_trees, myroot=myroot)
6163                                                         except Exception, e:
6164                                                                 if isinstance(e, SystemExit):
6165                                                                         raise
6166                                                                 # This is helpful, for example, if a ValueError
6167                                                                 # is thrown from cpv_expand due to multiple
6168                                                                 # matches (this can happen if an atom lacks a
6169                                                                 # category).
6170                                                                 show_invalid_depstring_notice(
6171                                                                         pkg, depstr, str(e))
6172                                                                 del e
6173                                                                 raise
6174                                                 finally:
6175                                                         portage.dep._dep_check_strict = True
6176                                                 if not success:
6177                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6178                                                         if replacement_pkg and \
6179                                                                 replacement_pkg[0].operation == "merge":
6180                                                                 # This package is being replaced anyway, so
6181                                                                 # ignore invalid dependencies so as not to
6182                                                                 # annoy the user too much (otherwise they'd be
6183                                                                 # forced to manually unmerge it first).
6184                                                                 continue
6185                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6186                                                         return False
6187                                                 blocker_atoms = [myatom for myatom in atoms \
6188                                                         if myatom.startswith("!")]
6189                                                 blocker_atoms.sort()
6190                                                 counter = long(pkg.metadata["COUNTER"])
6191                                                 blocker_cache[cpv] = \
6192                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6193                                         if blocker_atoms:
6194                                                 try:
6195                                                         for atom in blocker_atoms:
6196                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6197                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6198                                                                 self._blocker_parents.add(blocker, pkg)
6199                                                 except portage.exception.InvalidAtom, e:
6200                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6201                                                         show_invalid_depstring_notice(
6202                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6203                                                         return False
6204                                 for cpv in stale_cache:
6205                                         del blocker_cache[cpv]
6206                                 blocker_cache.flush()
6207                                 del blocker_cache
6208
6209                 # Discard any "uninstall" tasks scheduled by previous calls
6210                 # to this method, since those tasks may not make sense given
6211                 # the current graph state.
6212                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6213                 if previous_uninstall_tasks:
6214                         self._blocker_uninstalls = digraph()
6215                         self.digraph.difference_update(previous_uninstall_tasks)
6216
6217                 for blocker in self._blocker_parents.leaf_nodes():
6218                         self.spinner.update()
6219                         root_config = self.roots[blocker.root]
6220                         virtuals = root_config.settings.getvirtuals()
6221                         myroot = blocker.root
6222                         initial_db = self.trees[myroot]["vartree"].dbapi
6223                         final_db = self.mydbapi[myroot]
6224                         
6225                         provider_virtual = False
6226                         if blocker.cp in virtuals and \
6227                                 not self._have_new_virt(blocker.root, blocker.cp):
6228                                 provider_virtual = True
6229
6230                         if provider_virtual:
6231                                 atoms = []
6232                                 for provider_entry in virtuals[blocker.cp]:
6233                                         provider_cp = \
6234                                                 portage.dep_getkey(provider_entry)
6235                                         atoms.append(blocker.atom.replace(
6236                                                 blocker.cp, provider_cp))
6237                         else:
6238                                 atoms = [blocker.atom]
6239
6240                         blocked_initial = []
6241                         for atom in atoms:
6242                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6243
6244                         blocked_final = []
6245                         for atom in atoms:
6246                                 blocked_final.extend(final_db.match_pkgs(atom))
6247
6248                         if not blocked_initial and not blocked_final:
6249                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6250                                 self._blocker_parents.remove(blocker)
6251                                 # Discard any parents that don't have any more blockers.
6252                                 for pkg in parent_pkgs:
6253                                         self._irrelevant_blockers.add(blocker, pkg)
6254                                         if not self._blocker_parents.child_nodes(pkg):
6255                                                 self._blocker_parents.remove(pkg)
6256                                 continue
6257                         for parent in self._blocker_parents.parent_nodes(blocker):
6258                                 unresolved_blocks = False
6259                                 depends_on_order = set()
6260                                 for pkg in blocked_initial:
6261                                         if pkg.slot_atom == parent.slot_atom:
6262                                                 # TODO: Support blocks within slots in cases where it
6263                                                 # might make sense.  For example, a new version might
6264                                                 # require that the old version be uninstalled at build
6265                                                 # time.
6266                                                 continue
6267                                         if parent.installed:
6268                                                 # Two currently installed packages conflict with
6269                                                 # eachother. Ignore this case since the damage
6270                                                 # is already done and this would be likely to
6271                                                 # confuse users if displayed like a normal blocker.
6272                                                 continue
6273                                         if parent.operation == "merge":
6274                                                 # Maybe the blocked package can be replaced or simply
6275                                                 # unmerged to resolve this block.
6276                                                 depends_on_order.add((pkg, parent))
6277                                                 continue
6278                                         # None of the above blocker resolutions techniques apply,
6279                                         # so apparently this one is unresolvable.
6280                                         unresolved_blocks = True
6281                                 for pkg in blocked_final:
6282                                         if pkg.slot_atom == parent.slot_atom:
6283                                                 # TODO: Support blocks within slots.
6284                                                 continue
6285                                         if parent.operation == "nomerge" and \
6286                                                 pkg.operation == "nomerge":
6287                                                 # This blocker will be handled the next time that a
6288                                                 # merge of either package is triggered.
6289                                                 continue
6290
6291                                         # Maybe the blocking package can be
6292                                         # unmerged to resolve this block.
6293                                         if parent.operation == "merge" and pkg.installed:
6294                                                 depends_on_order.add((pkg, parent))
6295                                                 continue
6296                                         elif parent.operation == "nomerge":
6297                                                 depends_on_order.add((parent, pkg))
6298                                                 continue
6299                                         # None of the above blocker resolutions techniques apply,
6300                                         # so apparently this one is unresolvable.
6301                                         unresolved_blocks = True
6302
6303                                 # Make sure we don't unmerge any package that have been pulled
6304                                 # into the graph.
6305                                 if not unresolved_blocks and depends_on_order:
6306                                         for inst_pkg, inst_task in depends_on_order:
6307                                                 if self.digraph.contains(inst_pkg) and \
6308                                                         self.digraph.parent_nodes(inst_pkg):
6309                                                         unresolved_blocks = True
6310                                                         break
6311
6312                                 if not unresolved_blocks and depends_on_order:
6313                                         for inst_pkg, inst_task in depends_on_order:
6314                                                 uninst_task = Package(built=inst_pkg.built,
6315                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6316                                                         metadata=inst_pkg.metadata,
6317                                                         operation="uninstall",
6318                                                         root_config=inst_pkg.root_config,
6319                                                         type_name=inst_pkg.type_name)
6320                                                 self._pkg_cache[uninst_task] = uninst_task
6321                                                 # Enforce correct merge order with a hard dep.
6322                                                 self.digraph.addnode(uninst_task, inst_task,
6323                                                         priority=BlockerDepPriority.instance)
6324                                                 # Count references to this blocker so that it can be
6325                                                 # invalidated after nodes referencing it have been
6326                                                 # merged.
6327                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6328                                 if not unresolved_blocks and not depends_on_order:
6329                                         self._irrelevant_blockers.add(blocker, parent)
6330                                         self._blocker_parents.remove_edge(blocker, parent)
6331                                         if not self._blocker_parents.parent_nodes(blocker):
6332                                                 self._blocker_parents.remove(blocker)
6333                                         if not self._blocker_parents.child_nodes(parent):
6334                                                 self._blocker_parents.remove(parent)
6335                                 if unresolved_blocks:
6336                                         self._unsolvable_blockers.add(blocker, parent)
6337
6338                 return True
6339
6340         def _accept_blocker_conflicts(self):
6341                 acceptable = False
6342                 for x in ("--buildpkgonly", "--fetchonly",
6343                         "--fetch-all-uri", "--nodeps", "--pretend"):
6344                         if x in self.myopts:
6345                                 acceptable = True
6346                                 break
6347                 return acceptable
6348
6349         def _merge_order_bias(self, mygraph):
6350                 """Order nodes from highest to lowest overall reference count for
6351                 optimal leaf node selection."""
6352                 node_info = {}
6353                 for node in mygraph.order:
6354                         node_info[node] = len(mygraph.parent_nodes(node))
6355                 def cmp_merge_preference(node1, node2):
6356                         return node_info[node2] - node_info[node1]
6357                 mygraph.order.sort(cmp_merge_preference)
6358
6359         def altlist(self, reversed=False):
6360
6361                 while self._serialized_tasks_cache is None:
6362                         self._resolve_conflicts()
6363                         try:
6364                                 self._serialized_tasks_cache, self._scheduler_graph = \
6365                                         self._serialize_tasks()
6366                         except self._serialize_tasks_retry:
6367                                 pass
6368
6369                 retlist = self._serialized_tasks_cache[:]
6370                 if reversed:
6371                         retlist.reverse()
6372                 return retlist
6373
6374         def schedulerGraph(self):
6375                 """
6376                 The scheduler graph is identical to the normal one except that
6377                 uninstall edges are reversed in specific cases that require
6378                 conflicting packages to be temporarily installed simultaneously.
6379                 This is intended for use by the Scheduler in it's parallelization
6380                 logic. It ensures that temporary simultaneous installation of
6381                 conflicting packages is avoided when appropriate (especially for
6382                 !!atom blockers), but allowed in specific cases that require it.
6383
6384                 Note that this method calls break_refs() which alters the state of
6385                 internal Package instances such that this depgraph instance should
6386                 not be used to perform any more calculations.
6387                 """
6388                 if self._scheduler_graph is None:
6389                         self.altlist()
6390                 self.break_refs(self._scheduler_graph.order)
6391                 return self._scheduler_graph
6392
6393         def break_refs(self, nodes):
6394                 """
6395                 Take a mergelist like that returned from self.altlist() and
6396                 break any references that lead back to the depgraph. This is
6397                 useful if you want to hold references to packages without
6398                 also holding the depgraph on the heap.
6399                 """
6400                 for node in nodes:
6401                         if hasattr(node, "root_config"):
6402                                 # The FakeVartree references the _package_cache which
6403                                 # references the depgraph. So that Package instances don't
6404                                 # hold the depgraph and FakeVartree on the heap, replace
6405                                 # the RootConfig that references the FakeVartree with the
6406                                 # original RootConfig instance which references the actual
6407                                 # vartree.
6408                                 node.root_config = \
6409                                         self._trees_orig[node.root_config.root]["root_config"]
6410
6411         def _resolve_conflicts(self):
6412                 if not self._complete_graph():
6413                         raise self._unknown_internal_error()
6414
6415                 if not self.validate_blockers():
6416                         raise self._unknown_internal_error()
6417
6418                 if self._slot_collision_info:
6419                         self._process_slot_conflicts()
6420
6421         def _serialize_tasks(self):
6422                 scheduler_graph = self.digraph.copy()
6423                 mygraph=self.digraph.copy()
6424                 # Prune "nomerge" root nodes if nothing depends on them, since
6425                 # otherwise they slow down merge order calculation. Don't remove
6426                 # non-root nodes since they help optimize merge order in some cases
6427                 # such as revdep-rebuild.
6428                 removed_nodes = set()
6429                 while True:
6430                         for node in mygraph.root_nodes():
6431                                 if not isinstance(node, Package) or \
6432                                         node.installed or node.onlydeps:
6433                                         removed_nodes.add(node)
6434                         if removed_nodes:
6435                                 self.spinner.update()
6436                                 mygraph.difference_update(removed_nodes)
6437                         if not removed_nodes:
6438                                 break
6439                         removed_nodes.clear()
6440                 self._merge_order_bias(mygraph)
6441                 def cmp_circular_bias(n1, n2):
6442                         """
6443                         RDEPEND is stronger than PDEPEND and this function
6444                         measures such a strength bias within a circular
6445                         dependency relationship.
6446                         """
6447                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6448                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6449                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6450                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6451                         if n1_n2_medium == n2_n1_medium:
6452                                 return 0
6453                         elif n1_n2_medium:
6454                                 return 1
6455                         return -1
6456                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6457                 retlist=[]
6458                 # Contains uninstall tasks that have been scheduled to
6459                 # occur after overlapping blockers have been installed.
6460                 scheduled_uninstalls = set()
6461                 # Contains any Uninstall tasks that have been ignored
6462                 # in order to avoid the circular deps code path. These
6463                 # correspond to blocker conflicts that could not be
6464                 # resolved.
6465                 ignored_uninstall_tasks = set()
6466                 have_uninstall_task = False
6467                 complete = "complete" in self.myparams
6468                 myblocker_parents = self._blocker_parents.copy()
6469                 asap_nodes = []
6470
6471                 def get_nodes(**kwargs):
6472                         """
6473                         Returns leaf nodes excluding Uninstall instances
6474                         since those should be executed as late as possible.
6475                         """
6476                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6477                                 if isinstance(node, Package) and \
6478                                         (node.operation != "uninstall" or \
6479                                         node in scheduled_uninstalls)]
6480
6481                 # sys-apps/portage needs special treatment if ROOT="/"
6482                 running_root = self._running_root.root
6483                 from portage.const import PORTAGE_PACKAGE_ATOM
6484                 runtime_deps = InternalPackageSet(
6485                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
6486                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6487                         PORTAGE_PACKAGE_ATOM)
6488                 replacement_portage = self.mydbapi[running_root].match_pkgs(
6489                         PORTAGE_PACKAGE_ATOM)
6490
6491                 if running_portage:
6492                         running_portage = running_portage[0]
6493                 else:
6494                         running_portage = None
6495
6496                 if replacement_portage:
6497                         replacement_portage = replacement_portage[0]
6498                 else:
6499                         replacement_portage = None
6500
6501                 if replacement_portage == running_portage:
6502                         replacement_portage = None
6503
6504                 if replacement_portage is not None:
6505                         # update from running_portage to replacement_portage asap
6506                         asap_nodes.append(replacement_portage)
6507
6508                 if running_portage is not None:
6509                         try:
6510                                 portage_rdepend = self._select_atoms_highest_available(
6511                                         running_root, running_portage.metadata["RDEPEND"],
6512                                         myuse=running_portage.use.enabled,
6513                                         parent=running_portage, strict=False)
6514                         except portage.exception.InvalidDependString, e:
6515                                 portage.writemsg("!!! Invalid RDEPEND in " + \
6516                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6517                                         (running_root, running_portage.cpv, e), noiselevel=-1)
6518                                 del e
6519                                 portage_rdepend = []
6520                         runtime_deps.update(atom for atom in portage_rdepend \
6521                                 if not atom.startswith("!"))
6522
6523                 ignore_priority_soft_range = [None]
6524                 ignore_priority_soft_range.extend(
6525                         xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6526                 tree_mode = "--tree" in self.myopts
6527                 # Tracks whether or not the current iteration should prefer asap_nodes
6528                 # if available.  This is set to False when the previous iteration
6529                 # failed to select any nodes.  It is reset whenever nodes are
6530                 # successfully selected.
6531                 prefer_asap = True
6532
6533                 # By default, try to avoid selecting root nodes whenever possible. This
6534                 # helps ensure that the maximimum possible number of soft dependencies
6535                 # have been removed from the graph before their parent nodes have
6536                 # selected. This is especially important when those dependencies are
6537                 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6538                 # CHOST has been changed (like when building a stage3 from a stage2).
6539                 accept_root_node = False
6540
6541                 # State of prefer_asap and accept_root_node flags for successive
6542                 # iterations that loosen the criteria for node selection.
6543                 #
6544                 # iteration   prefer_asap   accept_root_node
6545                 # 1           True          False
6546                 # 2           False         False
6547                 # 3           False         True
6548                 #
6549                 # If no nodes are selected on the 3rd iteration, it is due to
6550                 # unresolved blockers or circular dependencies.
6551
6552                 while not mygraph.empty():
6553                         self.spinner.update()
6554                         selected_nodes = None
6555                         ignore_priority = None
6556                         if prefer_asap and asap_nodes:
6557                                 """ASAP nodes are merged before their soft deps."""
6558                                 asap_nodes = [node for node in asap_nodes \
6559                                         if mygraph.contains(node)]
6560                                 for node in asap_nodes:
6561                                         if not mygraph.child_nodes(node,
6562                                                 ignore_priority=DepPriority.SOFT):
6563                                                 selected_nodes = [node]
6564                                                 asap_nodes.remove(node)
6565                                                 break
6566                         if not selected_nodes and \
6567                                 not (prefer_asap and asap_nodes):
6568                                 for ignore_priority in ignore_priority_soft_range:
6569                                         nodes = get_nodes(ignore_priority=ignore_priority)
6570                                         if nodes:
6571                                                 break
6572                                 if nodes:
6573                                         if ignore_priority is None and not tree_mode:
6574                                                 # Greedily pop all of these nodes since no relationship
6575                                                 # has been ignored.  This optimization destroys --tree
6576                                                 # output, so it's disabled in reversed mode. If there
6577                                                 # is a mix of merge and uninstall nodes, save the
6578                                                 # uninstall nodes from later since sometimes a merge
6579                                                 # node will render an install node unnecessary, and
6580                                                 # we want to avoid doing a separate uninstall task in
6581                                                 # that case.
6582                                                 merge_nodes = [node for node in nodes \
6583                                                         if node.operation == "merge"]
6584                                                 if merge_nodes:
6585                                                         selected_nodes = merge_nodes
6586                                                 else:
6587                                                         selected_nodes = nodes
6588                                         else:
6589                                                 # For optimal merge order:
6590                                                 #  * Only pop one node.
6591                                                 #  * Removing a root node (node without a parent)
6592                                                 #    will not produce a leaf node, so avoid it.
6593                                                 for node in nodes:
6594                                                         if mygraph.parent_nodes(node):
6595                                                                 # found a non-root node
6596                                                                 selected_nodes = [node]
6597                                                                 break
6598                                                 if not selected_nodes and \
6599                                                         (accept_root_node or ignore_priority is None):
6600                                                         # settle for a root node
6601                                                         selected_nodes = [nodes[0]]
6602
6603                         if not selected_nodes:
6604                                 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6605                                 if nodes:
6606                                         """Recursively gather a group of nodes that RDEPEND on
6607                                         eachother.  This ensures that they are merged as a group
6608                                         and get their RDEPENDs satisfied as soon as possible."""
6609                                         def gather_deps(ignore_priority,
6610                                                 mergeable_nodes, selected_nodes, node):
6611                                                 if node in selected_nodes:
6612                                                         return True
6613                                                 if node not in mergeable_nodes:
6614                                                         return False
6615                                                 if node == replacement_portage and \
6616                                                         mygraph.child_nodes(node,
6617                                                         ignore_priority=DepPriority.MEDIUM_SOFT):
6618                                                         # Make sure that portage always has all of it's
6619                                                         # RDEPENDs installed first.
6620                                                         return False
6621                                                 selected_nodes.add(node)
6622                                                 for child in mygraph.child_nodes(node,
6623                                                         ignore_priority=ignore_priority):
6624                                                         if not gather_deps(ignore_priority,
6625                                                                 mergeable_nodes, selected_nodes, child):
6626                                                                 return False
6627                                                 return True
6628                                         mergeable_nodes = set(nodes)
6629                                         if prefer_asap and asap_nodes:
6630                                                 nodes = asap_nodes
6631                                         for ignore_priority in xrange(DepPriority.SOFT,
6632                                                 DepPriority.MEDIUM_SOFT + 1):
6633                                                 for node in nodes:
6634                                                         if nodes is not asap_nodes and \
6635                                                                 not accept_root_node and \
6636                                                                 not mygraph.parent_nodes(node):
6637                                                                 continue
6638                                                         selected_nodes = set()
6639                                                         if gather_deps(ignore_priority,
6640                                                                 mergeable_nodes, selected_nodes, node):
6641                                                                 break
6642                                                         else:
6643                                                                 selected_nodes = None
6644                                                 if selected_nodes:
6645                                                         break
6646
6647                                         # If any nodes have been selected here, it's always
6648                                         # possible that anything up to a MEDIUM_SOFT priority
6649                                         # relationship has been ignored. This state is recorded
6650                                         # in ignore_priority so that relevant nodes will be
6651                                         # added to asap_nodes when appropriate.
6652                                         if selected_nodes:
6653                                                 ignore_priority = DepPriority.MEDIUM_SOFT
6654
6655                                         if prefer_asap and asap_nodes and not selected_nodes:
6656                                                 # We failed to find any asap nodes to merge, so ignore
6657                                                 # them for the next iteration.
6658                                                 prefer_asap = False
6659                                                 continue
6660
6661                                         if not selected_nodes and not accept_root_node:
6662                                                 # Maybe there are only root nodes left, so accept them
6663                                                 # for the next iteration.
6664                                                 accept_root_node = True
6665                                                 continue
6666
6667                         if selected_nodes and ignore_priority > DepPriority.SOFT:
6668                                 # Try to merge ignored medium deps as soon as possible.
6669                                 for node in selected_nodes:
6670                                         children = set(mygraph.child_nodes(node))
6671                                         soft = children.difference(
6672                                                 mygraph.child_nodes(node,
6673                                                 ignore_priority=DepPriority.SOFT))
6674                                         medium_soft = children.difference(
6675                                                 mygraph.child_nodes(node,
6676                                                 ignore_priority=DepPriority.MEDIUM_SOFT))
6677                                         medium_soft.difference_update(soft)
6678                                         for child in medium_soft:
6679                                                 if child in selected_nodes:
6680                                                         continue
6681                                                 if child in asap_nodes:
6682                                                         continue
6683                                                 asap_nodes.append(child)
6684
6685                         if selected_nodes and len(selected_nodes) > 1:
6686                                 if not isinstance(selected_nodes, list):
6687                                         selected_nodes = list(selected_nodes)
6688                                 selected_nodes.sort(cmp_circular_bias)
6689
6690                         if not selected_nodes and not myblocker_uninstalls.is_empty():
6691                                 # An Uninstall task needs to be executed in order to
6692                                 # avoid conflict if possible.
6693                                 min_parent_deps = None
6694                                 uninst_task = None
6695                                 for task in myblocker_uninstalls.leaf_nodes():
6696                                         # Do some sanity checks so that system or world packages
6697                                         # don't get uninstalled inappropriately here (only really
6698                                         # necessary when --complete-graph has not been enabled).
6699
6700                                         if task in ignored_uninstall_tasks:
6701                                                 continue
6702
6703                                         if task in scheduled_uninstalls:
6704                                                 # It's been scheduled but it hasn't
6705                                                 # been executed yet due to dependence
6706                                                 # on installation of blocking packages.
6707                                                 continue
6708
6709                                         root_config = self.roots[task.root]
6710                                         inst_pkg = self._pkg_cache[
6711                                                 ("installed", task.root, task.cpv, "nomerge")]
6712
6713                                         if self.digraph.contains(inst_pkg):
6714                                                 continue
6715
6716                                         forbid_overlap = False
6717                                         heuristic_overlap = False
6718                                         for blocker in myblocker_uninstalls.parent_nodes(task):
6719                                                 if blocker.eapi in ("0", "1"):
6720                                                         heuristic_overlap = True
6721                                                 elif blocker.atom.blocker.overlap.forbid:
6722                                                         forbid_overlap = True
6723                                                         break
6724                                         if forbid_overlap and running_root == task.root:
6725                                                 continue
6726
6727                                         if heuristic_overlap and running_root == task.root:
6728                                                 # Never uninstall sys-apps/portage or it's essential
6729                                                 # dependencies, except through replacement.
6730                                                 try:
6731                                                         runtime_dep_atoms = \
6732                                                                 list(runtime_deps.iterAtomsForPackage(task))
6733                                                 except portage.exception.InvalidDependString, e:
6734                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6735                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6736                                                                 (task.root, task.cpv, e), noiselevel=-1)
6737                                                         del e
6738                                                         continue
6739
6740                                                 # Don't uninstall a runtime dep if it appears
6741                                                 # to be the only suitable one installed.
6742                                                 skip = False
6743                                                 vardb = root_config.trees["vartree"].dbapi
6744                                                 for atom in runtime_dep_atoms:
6745                                                         other_version = None
6746                                                         for pkg in vardb.match_pkgs(atom):
6747                                                                 if pkg.cpv == task.cpv and \
6748                                                                         pkg.metadata["COUNTER"] == \
6749                                                                         task.metadata["COUNTER"]:
6750                                                                         continue
6751                                                                 other_version = pkg
6752                                                                 break
6753                                                         if other_version is None:
6754                                                                 skip = True
6755                                                                 break
6756                                                 if skip:
6757                                                         continue
6758
6759                                                 # For packages in the system set, don't take
6760                                                 # any chances. If the conflict can't be resolved
6761                                                 # by a normal replacement operation then abort.
6762                                                 skip = False
6763                                                 try:
6764                                                         for atom in root_config.sets[
6765                                                                 "system"].iterAtomsForPackage(task):
6766                                                                 skip = True
6767                                                                 break
6768                                                 except portage.exception.InvalidDependString, e:
6769                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6770                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6771                                                                 (task.root, task.cpv, e), noiselevel=-1)
6772                                                         del e
6773                                                         skip = True
6774                                                 if skip:
6775                                                         continue
6776
6777                                         # Note that the world check isn't always
6778                                         # necessary since self._complete_graph() will
6779                                         # add all packages from the system and world sets to the
6780                                         # graph. This just allows unresolved conflicts to be
6781                                         # detected as early as possible, which makes it possible
6782                                         # to avoid calling self._complete_graph() when it is
6783                                         # unnecessary due to blockers triggering an abortion.
6784                                         if not complete:
6785                                                 # For packages in the world set, go ahead an uninstall
6786                                                 # when necessary, as long as the atom will be satisfied
6787                                                 # in the final state.
6788                                                 graph_db = self.mydbapi[task.root]
6789                                                 skip = False
6790                                                 try:
6791                                                         for atom in root_config.sets[
6792                                                                 "world"].iterAtomsForPackage(task):
6793                                                                 satisfied = False
6794                                                                 for pkg in graph_db.match_pkgs(atom):
6795                                                                         if pkg == inst_pkg:
6796                                                                                 continue
6797                                                                         satisfied = True
6798                                                                         break
6799                                                                 if not satisfied:
6800                                                                         skip = True
6801                                                                         break
6802                                                 except portage.exception.InvalidDependString, e:
6803                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6804                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6805                                                                 (task.root, task.cpv, e), noiselevel=-1)
6806                                                         del e
6807                                                         skip = True
6808                                                 if skip:
6809                                                         continue
6810
6811                                         # Check the deps of parent nodes to ensure that
6812                                         # the chosen task produces a leaf node. Maybe
6813                                         # this can be optimized some more to make the
6814                                         # best possible choice, but the current algorithm
6815                                         # is simple and should be near optimal for most
6816                                         # common cases.
6817                                         parent_deps = set()
6818                                         for parent in mygraph.parent_nodes(task):
6819                                                 parent_deps.update(mygraph.child_nodes(parent,
6820                                                         ignore_priority=DepPriority.MEDIUM_SOFT))
6821                                         parent_deps.remove(task)
6822                                         if min_parent_deps is None or \
6823                                                 len(parent_deps) < min_parent_deps:
6824                                                 min_parent_deps = len(parent_deps)
6825                                                 uninst_task = task
6826
6827                                 if uninst_task is not None:
6828                                         # The uninstall is performed only after blocking
6829                                         # packages have been merged on top of it. File
6830                                         # collisions between blocking packages are detected
6831                                         # and removed from the list of files to be uninstalled.
6832                                         scheduled_uninstalls.add(uninst_task)
6833                                         parent_nodes = mygraph.parent_nodes(uninst_task)
6834
6835                                         # Reverse the parent -> uninstall edges since we want
6836                                         # to do the uninstall after blocking packages have
6837                                         # been merged on top of it.
6838                                         mygraph.remove(uninst_task)
6839                                         for blocked_pkg in parent_nodes:
6840                                                 mygraph.add(blocked_pkg, uninst_task,
6841                                                         priority=BlockerDepPriority.instance)
6842                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6843                                                 scheduler_graph.add(blocked_pkg, uninst_task,
6844                                                         priority=BlockerDepPriority.instance)
6845
6846                                 else:
6847                                         # None of the Uninstall tasks are acceptable, so
6848                                         # the corresponding blockers are unresolvable.
6849                                         # We need to drop an Uninstall task here in order
6850                                         # to avoid the circular deps code path, but the
6851                                         # blocker will still be counted as an unresolved
6852                                         # conflict.
6853                                         for node in myblocker_uninstalls.leaf_nodes():
6854                                                 try:
6855                                                         mygraph.remove(node)
6856                                                 except KeyError:
6857                                                         pass
6858                                                 else:
6859                                                         ignored_uninstall_tasks.add(node)
6860                                                         break
6861
6862                                 # After dropping an Uninstall task, reset
6863                                 # the state variables for leaf node selection and
6864                                 # continue trying to select leaf nodes.
6865                                 prefer_asap = True
6866                                 accept_root_node = False
6867                                 continue
6868
6869                         if not selected_nodes:
6870                                 self._circular_deps_for_display = mygraph
6871                                 raise self._unknown_internal_error()
6872
6873                         # At this point, we've succeeded in selecting one or more nodes, so
6874                         # it's now safe to reset the prefer_asap and accept_root_node flags
6875                         # to their default states.
6876                         prefer_asap = True
6877                         accept_root_node = False
6878
6879                         mygraph.difference_update(selected_nodes)
6880
6881                         for node in selected_nodes:
6882                                 if isinstance(node, Package) and \
6883                                         node.operation == "nomerge":
6884                                         continue
6885
6886                                 # Handle interactions between blockers
6887                                 # and uninstallation tasks.
6888                                 solved_blockers = set()
6889                                 uninst_task = None
6890                                 if isinstance(node, Package) and \
6891                                         "uninstall" == node.operation:
6892                                         have_uninstall_task = True
6893                                         uninst_task = node
6894                                 else:
6895                                         vardb = self.trees[node.root]["vartree"].dbapi
6896                                         previous_cpv = vardb.match(node.slot_atom)
6897                                         if previous_cpv:
6898                                                 # The package will be replaced by this one, so remove
6899                                                 # the corresponding Uninstall task if necessary.
6900                                                 previous_cpv = previous_cpv[0]
6901                                                 uninst_task = \
6902                                                         ("installed", node.root, previous_cpv, "uninstall")
6903                                                 try:
6904                                                         mygraph.remove(uninst_task)
6905                                                 except KeyError:
6906                                                         pass
6907
6908                                 if uninst_task is not None and \
6909                                         uninst_task not in ignored_uninstall_tasks and \
6910                                         myblocker_uninstalls.contains(uninst_task):
6911                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6912                                         myblocker_uninstalls.remove(uninst_task)
6913                                         # Discard any blockers that this Uninstall solves.
6914                                         for blocker in blocker_nodes:
6915                                                 if not myblocker_uninstalls.child_nodes(blocker):
6916                                                         myblocker_uninstalls.remove(blocker)
6917                                                         solved_blockers.add(blocker)
6918
6919                                 retlist.append(node)
6920
6921                                 if (isinstance(node, Package) and \
6922                                         "uninstall" == node.operation) or \
6923                                         (uninst_task is not None and \
6924                                         uninst_task in scheduled_uninstalls):
6925                                         # Include satisfied blockers in the merge list
6926                                         # since the user might be interested and also
6927                                         # it serves as an indicator that blocking packages
6928                                         # will be temporarily installed simultaneously.
6929                                         for blocker in solved_blockers:
6930                                                 retlist.append(Blocker(atom=blocker.atom,
6931                                                         root=blocker.root, eapi=blocker.eapi,
6932                                                         satisfied=True))
6933
6934                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
6935                 for node in myblocker_uninstalls.root_nodes():
6936                         unsolvable_blockers.add(node)
6937
6938                 for blocker in unsolvable_blockers:
6939                         retlist.append(blocker)
6940
6941                 # If any Uninstall tasks need to be executed in order
6942                 # to avoid a conflict, complete the graph with any
6943                 # dependencies that may have been initially
6944                 # neglected (to ensure that unsafe Uninstall tasks
6945                 # are properly identified and blocked from execution).
6946                 if have_uninstall_task and \
6947                         not complete and \
6948                         not unsolvable_blockers:
6949                         self.myparams.add("complete")
6950                         raise self._serialize_tasks_retry("")
6951
6952                 if unsolvable_blockers and \
6953                         not self._accept_blocker_conflicts():
6954                         self._unsatisfied_blockers_for_display = unsolvable_blockers
6955                         self._serialized_tasks_cache = retlist[:]
6956                         self._scheduler_graph = scheduler_graph
6957                         raise self._unknown_internal_error()
6958
6959                 if self._slot_collision_info and \
6960                         not self._accept_blocker_conflicts():
6961                         self._serialized_tasks_cache = retlist[:]
6962                         self._scheduler_graph = scheduler_graph
6963                         raise self._unknown_internal_error()
6964
6965                 return retlist, scheduler_graph
6966
6967         def _show_circular_deps(self, mygraph):
6968                 # No leaf nodes are available, so we have a circular
6969                 # dependency panic situation.  Reduce the noise level to a
6970                 # minimum via repeated elimination of root nodes since they
6971                 # have no parents and thus can not be part of a cycle.
6972                 while True:
6973                         root_nodes = mygraph.root_nodes(
6974                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6975                         if not root_nodes:
6976                                 break
6977                         mygraph.difference_update(root_nodes)
6978                 # Display the USE flags that are enabled on nodes that are part
6979                 # of dependency cycles in case that helps the user decide to
6980                 # disable some of them.
6981                 display_order = []
6982                 tempgraph = mygraph.copy()
6983                 while not tempgraph.empty():
6984                         nodes = tempgraph.leaf_nodes()
6985                         if not nodes:
6986                                 node = tempgraph.order[0]
6987                         else:
6988                                 node = nodes[0]
6989                         display_order.append(node)
6990                         tempgraph.remove(node)
6991                 display_order.reverse()
6992                 self.myopts.pop("--quiet", None)
6993                 self.myopts.pop("--verbose", None)
6994                 self.myopts["--tree"] = True
6995                 portage.writemsg("\n\n", noiselevel=-1)
6996                 self.display(display_order)
6997                 prefix = colorize("BAD", " * ")
6998                 portage.writemsg("\n", noiselevel=-1)
6999                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7000                         noiselevel=-1)
7001                 portage.writemsg("\n", noiselevel=-1)
7002                 mygraph.debug_print()
7003                 portage.writemsg("\n", noiselevel=-1)
7004                 portage.writemsg(prefix + "Note that circular dependencies " + \
7005                         "can often be avoided by temporarily\n", noiselevel=-1)
7006                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7007                         "optional dependencies.\n", noiselevel=-1)
7008
7009         def _show_merge_list(self):
7010                 if self._serialized_tasks_cache is not None and \
7011                         not (self._displayed_list and \
7012                         (self._displayed_list == self._serialized_tasks_cache or \
7013                         self._displayed_list == \
7014                                 list(reversed(self._serialized_tasks_cache)))):
7015                         display_list = self._serialized_tasks_cache[:]
7016                         if "--tree" in self.myopts:
7017                                 display_list.reverse()
7018                         self.display(display_list)
7019
7020         def _show_unsatisfied_blockers(self, blockers):
7021                 self._show_merge_list()
7022                 msg = "Error: The above package list contains " + \
7023                         "packages which cannot be installed " + \
7024                         "at the same time on the same system."
7025                 prefix = colorize("BAD", " * ")
7026                 from textwrap import wrap
7027                 portage.writemsg("\n", noiselevel=-1)
7028                 for line in wrap(msg, 70):
7029                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7030                 if "--quiet" not in self.myopts:
7031                         show_blocker_docs_link()
7032
7033         def display(self, mylist, favorites=[], verbosity=None):
7034
7035                 # This is used to prevent display_problems() from
7036                 # redundantly displaying this exact same merge list
7037                 # again via _show_merge_list().
7038                 self._displayed_list = mylist
7039
7040                 if verbosity is None:
7041                         verbosity = ("--quiet" in self.myopts and 1 or \
7042                                 "--verbose" in self.myopts and 3 or 2)
7043                 favorites_set = InternalPackageSet(favorites)
7044                 oneshot = "--oneshot" in self.myopts or \
7045                         "--onlydeps" in self.myopts
7046                 columns = "--columns" in self.myopts
7047                 changelogs=[]
7048                 p=[]
7049                 blockers = []
7050
7051                 counters = PackageCounters()
7052
7053                 if verbosity == 1 and "--verbose" not in self.myopts:
7054                         def create_use_string(*args):
7055                                 return ""
7056                 else:
7057                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7058                                 old_iuse, old_use,
7059                                 is_new, reinst_flags,
7060                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7061                                 alphabetical=("--alphabetical" in self.myopts)):
7062                                 enabled = []
7063                                 if alphabetical:
7064                                         disabled = enabled
7065                                         removed = enabled
7066                                 else:
7067                                         disabled = []
7068                                         removed = []
7069                                 cur_iuse = set(cur_iuse)
7070                                 enabled_flags = cur_iuse.intersection(cur_use)
7071                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7072                                 any_iuse = cur_iuse.union(old_iuse)
7073                                 any_iuse = list(any_iuse)
7074                                 any_iuse.sort()
7075                                 for flag in any_iuse:
7076                                         flag_str = None
7077                                         isEnabled = False
7078                                         reinst_flag = reinst_flags and flag in reinst_flags
7079                                         if flag in enabled_flags:
7080                                                 isEnabled = True
7081                                                 if is_new or flag in old_use and \
7082                                                         (all_flags or reinst_flag):
7083                                                         flag_str = red(flag)
7084                                                 elif flag not in old_iuse:
7085                                                         flag_str = yellow(flag) + "%*"
7086                                                 elif flag not in old_use:
7087                                                         flag_str = green(flag) + "*"
7088                                         elif flag in removed_iuse:
7089                                                 if all_flags or reinst_flag:
7090                                                         flag_str = yellow("-" + flag) + "%"
7091                                                         if flag in old_use:
7092                                                                 flag_str += "*"
7093                                                         flag_str = "(" + flag_str + ")"
7094                                                         removed.append(flag_str)
7095                                                 continue
7096                                         else:
7097                                                 if is_new or flag in old_iuse and \
7098                                                         flag not in old_use and \
7099                                                         (all_flags or reinst_flag):
7100                                                         flag_str = blue("-" + flag)
7101                                                 elif flag not in old_iuse:
7102                                                         flag_str = yellow("-" + flag)
7103                                                         if flag not in iuse_forced:
7104                                                                 flag_str += "%"
7105                                                 elif flag in old_use:
7106                                                         flag_str = green("-" + flag) + "*"
7107                                         if flag_str:
7108                                                 if flag in iuse_forced:
7109                                                         flag_str = "(" + flag_str + ")"
7110                                                 if isEnabled:
7111                                                         enabled.append(flag_str)
7112                                                 else:
7113                                                         disabled.append(flag_str)
7114
7115                                 if alphabetical:
7116                                         ret = " ".join(enabled)
7117                                 else:
7118                                         ret = " ".join(enabled + disabled + removed)
7119                                 if ret:
7120                                         ret = '%s="%s" ' % (name, ret)
7121                                 return ret
7122
7123                 repo_display = RepoDisplay(self.roots)
7124
7125                 tree_nodes = []
7126                 display_list = []
7127                 mygraph = self.digraph.copy()
7128
7129                 # If there are any Uninstall instances, add the corresponding
7130                 # blockers to the digraph (useful for --tree display).
7131
7132                 executed_uninstalls = set(node for node in mylist \
7133                         if isinstance(node, Package) and node.operation == "unmerge")
7134
7135                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7136                         uninstall_parents = \
7137                                 self._blocker_uninstalls.parent_nodes(uninstall)
7138                         if not uninstall_parents:
7139                                 continue
7140
7141                         # Remove the corresponding "nomerge" node and substitute
7142                         # the Uninstall node.
7143                         inst_pkg = self._pkg_cache[
7144                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7145                         try:
7146                                 mygraph.remove(inst_pkg)
7147                         except KeyError:
7148                                 pass
7149
7150                         try:
7151                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7152                         except KeyError:
7153                                 inst_pkg_blockers = []
7154
7155                         # Break the Package -> Uninstall edges.
7156                         mygraph.remove(uninstall)
7157
7158                         # Resolution of a package's blockers
7159                         # depend on it's own uninstallation.
7160                         for blocker in inst_pkg_blockers:
7161                                 mygraph.add(uninstall, blocker)
7162
7163                         # Expand Package -> Uninstall edges into
7164                         # Package -> Blocker -> Uninstall edges.
7165                         for blocker in uninstall_parents:
7166                                 mygraph.add(uninstall, blocker)
7167                                 for parent in self._blocker_parents.parent_nodes(blocker):
7168                                         if parent != inst_pkg:
7169                                                 mygraph.add(blocker, parent)
7170
7171                         # If the uninstall task did not need to be executed because
7172                         # of an upgrade, display Blocker -> Upgrade edges since the
7173                         # corresponding Blocker -> Uninstall edges will not be shown.
7174                         upgrade_node = \
7175                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7176                         if upgrade_node is not None and \
7177                                 uninstall not in executed_uninstalls:
7178                                 for blocker in uninstall_parents:
7179                                         mygraph.add(upgrade_node, blocker)
7180
7181                 unsatisfied_blockers = []
7182                 i = 0
7183                 depth = 0
7184                 shown_edges = set()
7185                 for x in mylist:
7186                         if isinstance(x, Blocker) and not x.satisfied:
7187                                 unsatisfied_blockers.append(x)
7188                                 continue
7189                         graph_key = x
7190                         if "--tree" in self.myopts:
7191                                 depth = len(tree_nodes)
7192                                 while depth and graph_key not in \
7193                                         mygraph.child_nodes(tree_nodes[depth-1]):
7194                                                 depth -= 1
7195                                 if depth:
7196                                         tree_nodes = tree_nodes[:depth]
7197                                         tree_nodes.append(graph_key)
7198                                         display_list.append((x, depth, True))
7199                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7200                                 else:
7201                                         traversed_nodes = set() # prevent endless circles
7202                                         traversed_nodes.add(graph_key)
7203                                         def add_parents(current_node, ordered):
7204                                                 parent_nodes = None
7205                                                 # Do not traverse to parents if this node is an
7206                                                 # an argument or a direct member of a set that has
7207                                                 # been specified as an argument (system or world).
7208                                                 if current_node not in self._set_nodes:
7209                                                         parent_nodes = mygraph.parent_nodes(current_node)
7210                                                 if parent_nodes:
7211                                                         child_nodes = set(mygraph.child_nodes(current_node))
7212                                                         selected_parent = None
7213                                                         # First, try to avoid a direct cycle.
7214                                                         for node in parent_nodes:
7215                                                                 if not isinstance(node, (Blocker, Package)):
7216                                                                         continue
7217                                                                 if node not in traversed_nodes and \
7218                                                                         node not in child_nodes:
7219                                                                         edge = (current_node, node)
7220                                                                         if edge in shown_edges:
7221                                                                                 continue
7222                                                                         selected_parent = node
7223                                                                         break
7224                                                         if not selected_parent:
7225                                                                 # A direct cycle is unavoidable.
7226                                                                 for node in parent_nodes:
7227                                                                         if not isinstance(node, (Blocker, Package)):
7228                                                                                 continue
7229                                                                         if node not in traversed_nodes:
7230                                                                                 edge = (current_node, node)
7231                                                                                 if edge in shown_edges:
7232                                                                                         continue
7233                                                                                 selected_parent = node
7234                                                                                 break
7235                                                         if selected_parent:
7236                                                                 shown_edges.add((current_node, selected_parent))
7237                                                                 traversed_nodes.add(selected_parent)
7238                                                                 add_parents(selected_parent, False)
7239                                                 display_list.append((current_node,
7240                                                         len(tree_nodes), ordered))
7241                                                 tree_nodes.append(current_node)
7242                                         tree_nodes = []
7243                                         add_parents(graph_key, True)
7244                         else:
7245                                 display_list.append((x, depth, True))
7246                 mylist = display_list
7247                 for x in unsatisfied_blockers:
7248                         mylist.append((x, 0, True))
7249
7250                 last_merge_depth = 0
7251                 for i in xrange(len(mylist)-1,-1,-1):
7252                         graph_key, depth, ordered = mylist[i]
7253                         if not ordered and depth == 0 and i > 0 \
7254                                 and graph_key == mylist[i-1][0] and \
7255                                 mylist[i-1][1] == 0:
7256                                 # An ordered node got a consecutive duplicate when the tree was
7257                                 # being filled in.
7258                                 del mylist[i]
7259                                 continue
7260                         if ordered and graph_key[-1] != "nomerge":
7261                                 last_merge_depth = depth
7262                                 continue
7263                         if depth >= last_merge_depth or \
7264                                 i < len(mylist) - 1 and \
7265                                 depth >= mylist[i+1][1]:
7266                                         del mylist[i]
7267
7268                 from portage import flatten
7269                 from portage.dep import use_reduce, paren_reduce
7270                 # files to fetch list - avoids counting a same file twice
7271                 # in size display (verbose mode)
7272                 myfetchlist=[]
7273
7274                 # Use this set to detect when all the "repoadd" strings are "[0]"
7275                 # and disable the entire repo display in this case.
7276                 repoadd_set = set()
7277
7278                 for mylist_index in xrange(len(mylist)):
7279                         x, depth, ordered = mylist[mylist_index]
7280                         pkg_type = x[0]
7281                         myroot = x[1]
7282                         pkg_key = x[2]
7283                         portdb = self.trees[myroot]["porttree"].dbapi
7284                         bindb  = self.trees[myroot]["bintree"].dbapi
7285                         vardb = self.trees[myroot]["vartree"].dbapi
7286                         vartree = self.trees[myroot]["vartree"]
7287                         pkgsettings = self.pkgsettings[myroot]
7288
7289                         fetch=" "
7290                         indent = " " * depth
7291
7292                         if isinstance(x, Blocker):
7293                                 if x.satisfied:
7294                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7295                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7296                                 else:
7297                                         blocker_style = "PKG_BLOCKER"
7298                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7299                                 if ordered:
7300                                         counters.blocks += 1
7301                                         if x.satisfied:
7302                                                 counters.blocks_satisfied += 1
7303                                 resolved = portage.key_expand(
7304                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7305                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7306                                         addl += " " + colorize(blocker_style, resolved)
7307                                 else:
7308                                         addl = "[%s %s] %s%s" % \
7309                                                 (colorize(blocker_style, "blocks"),
7310                                                 addl, indent, colorize(blocker_style, resolved))
7311                                 block_parents = self._blocker_parents.parent_nodes(x)
7312                                 block_parents = set([pnode[2] for pnode in block_parents])
7313                                 block_parents = ", ".join(block_parents)
7314                                 if resolved!=x[2]:
7315                                         addl += colorize(blocker_style,
7316                                                 " (\"%s\" is blocking %s)") % \
7317                                                 (str(x.atom).lstrip("!"), block_parents)
7318                                 else:
7319                                         addl += colorize(blocker_style,
7320                                                 " (is blocking %s)") % block_parents
7321                                 if isinstance(x, Blocker) and x.satisfied:
7322                                         if columns:
7323                                                 continue
7324                                         p.append(addl)
7325                                 else:
7326                                         blockers.append(addl)
7327                         else:
7328                                 pkg_status = x[3]
7329                                 pkg_merge = ordered and pkg_status == "merge"
7330                                 if not pkg_merge and pkg_status == "merge":
7331                                         pkg_status = "nomerge"
7332                                 built = pkg_type != "ebuild"
7333                                 installed = pkg_type == "installed"
7334                                 pkg = x
7335                                 metadata = pkg.metadata
7336                                 ebuild_path = None
7337                                 repo_name = metadata["repository"]
7338                                 if pkg_type == "ebuild":
7339                                         ebuild_path = portdb.findname(pkg_key)
7340                                         if not ebuild_path: # shouldn't happen
7341                                                 raise portage.exception.PackageNotFound(pkg_key)
7342                                         repo_path_real = os.path.dirname(os.path.dirname(
7343                                                 os.path.dirname(ebuild_path)))
7344                                 else:
7345                                         repo_path_real = portdb.getRepositoryPath(repo_name)
7346                                 pkg_use = list(pkg.use.enabled)
7347                                 try:
7348                                         restrict = flatten(use_reduce(paren_reduce(
7349                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7350                                 except portage.exception.InvalidDependString, e:
7351                                         if not pkg.installed:
7352                                                 show_invalid_depstring_notice(x,
7353                                                         pkg.metadata["RESTRICT"], str(e))
7354                                                 del e
7355                                                 return 1
7356                                         restrict = []
7357                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7358                                         "fetch" in restrict:
7359                                         fetch = red("F")
7360                                         if ordered:
7361                                                 counters.restrict_fetch += 1
7362                                         if portdb.fetch_check(pkg_key, pkg_use):
7363                                                 fetch = green("f")
7364                                                 if ordered:
7365                                                         counters.restrict_fetch_satisfied += 1
7366
7367                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7368                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
7369                                 myoldbest = []
7370                                 myinslotlist = None
7371                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7372                                 if vardb.cpv_exists(pkg_key):
7373                                         addl="  "+yellow("R")+fetch+"  "
7374                                         if ordered:
7375                                                 if pkg_merge:
7376                                                         counters.reinst += 1
7377                                                 elif pkg_status == "uninstall":
7378                                                         counters.uninst += 1
7379                                 # filter out old-style virtual matches
7380                                 elif installed_versions and \
7381                                         portage.cpv_getkey(installed_versions[0]) == \
7382                                         portage.cpv_getkey(pkg_key):
7383                                         myinslotlist = vardb.match(pkg.slot_atom)
7384                                         # If this is the first install of a new-style virtual, we
7385                                         # need to filter out old-style virtual matches.
7386                                         if myinslotlist and \
7387                                                 portage.cpv_getkey(myinslotlist[0]) != \
7388                                                 portage.cpv_getkey(pkg_key):
7389                                                 myinslotlist = None
7390                                         if myinslotlist:
7391                                                 myoldbest = myinslotlist[:]
7392                                                 addl = "   " + fetch
7393                                                 if not portage.dep.cpvequal(pkg_key,
7394                                                         portage.best([pkg_key] + myoldbest)):
7395                                                         # Downgrade in slot
7396                                                         addl += turquoise("U")+blue("D")
7397                                                         if ordered:
7398                                                                 counters.downgrades += 1
7399                                                 else:
7400                                                         # Update in slot
7401                                                         addl += turquoise("U") + " "
7402                                                         if ordered:
7403                                                                 counters.upgrades += 1
7404                                         else:
7405                                                 # New slot, mark it new.
7406                                                 addl = " " + green("NS") + fetch + "  "
7407                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7408                                                 if ordered:
7409                                                         counters.newslot += 1
7410
7411                                         if "--changelog" in self.myopts:
7412                                                 inst_matches = vardb.match(pkg.slot_atom)
7413                                                 if inst_matches:
7414                                                         changelogs.extend(self.calc_changelog(
7415                                                                 portdb.findname(pkg_key),
7416                                                                 inst_matches[0], pkg_key))
7417                                 else:
7418                                         addl = " " + green("N") + " " + fetch + "  "
7419                                         if ordered:
7420                                                 counters.new += 1
7421
7422                                 verboseadd = ""
7423                                 repoadd = None
7424
7425                                 if True:
7426                                         # USE flag display
7427                                         forced_flags = set()
7428                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7429                                         forced_flags.update(pkgsettings.useforce)
7430                                         forced_flags.update(pkgsettings.usemask)
7431
7432                                         cur_use = [flag for flag in pkg.use.enabled \
7433                                                 if flag in pkg.iuse.all]
7434                                         cur_iuse = sorted(pkg.iuse.all)
7435
7436                                         if myoldbest and myinslotlist:
7437                                                 previous_cpv = myoldbest[0]
7438                                         else:
7439                                                 previous_cpv = pkg.cpv
7440                                         if vardb.cpv_exists(previous_cpv):
7441                                                 old_iuse, old_use = vardb.aux_get(
7442                                                                 previous_cpv, ["IUSE", "USE"])
7443                                                 old_iuse = list(set(
7444                                                         filter_iuse_defaults(old_iuse.split())))
7445                                                 old_iuse.sort()
7446                                                 old_use = old_use.split()
7447                                                 is_new = False
7448                                         else:
7449                                                 old_iuse = []
7450                                                 old_use = []
7451                                                 is_new = True
7452
7453                                         old_use = [flag for flag in old_use if flag in old_iuse]
7454
7455                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
7456                                         use_expand.sort()
7457                                         use_expand.reverse()
7458                                         use_expand_hidden = \
7459                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7460
7461                                         def map_to_use_expand(myvals, forcedFlags=False,
7462                                                 removeHidden=True):
7463                                                 ret = {}
7464                                                 forced = {}
7465                                                 for exp in use_expand:
7466                                                         ret[exp] = []
7467                                                         forced[exp] = set()
7468                                                         for val in myvals[:]:
7469                                                                 if val.startswith(exp.lower()+"_"):
7470                                                                         if val in forced_flags:
7471                                                                                 forced[exp].add(val[len(exp)+1:])
7472                                                                         ret[exp].append(val[len(exp)+1:])
7473                                                                         myvals.remove(val)
7474                                                 ret["USE"] = myvals
7475                                                 forced["USE"] = [val for val in myvals \
7476                                                         if val in forced_flags]
7477                                                 if removeHidden:
7478                                                         for exp in use_expand_hidden:
7479                                                                 ret.pop(exp, None)
7480                                                 if forcedFlags:
7481                                                         return ret, forced
7482                                                 return ret
7483
7484                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7485                                         # are the only thing that triggered reinstallation.
7486                                         reinst_flags_map = {}
7487                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
7488                                         reinst_expand_map = None
7489                                         if reinstall_for_flags:
7490                                                 reinst_flags_map = map_to_use_expand(
7491                                                         list(reinstall_for_flags), removeHidden=False)
7492                                                 for k in list(reinst_flags_map):
7493                                                         if not reinst_flags_map[k]:
7494                                                                 del reinst_flags_map[k]
7495                                                 if not reinst_flags_map.get("USE"):
7496                                                         reinst_expand_map = reinst_flags_map.copy()
7497                                                         reinst_expand_map.pop("USE", None)
7498                                         if reinst_expand_map and \
7499                                                 not set(reinst_expand_map).difference(
7500                                                 use_expand_hidden):
7501                                                 use_expand_hidden = \
7502                                                         set(use_expand_hidden).difference(
7503                                                         reinst_expand_map)
7504
7505                                         cur_iuse_map, iuse_forced = \
7506                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
7507                                         cur_use_map = map_to_use_expand(cur_use)
7508                                         old_iuse_map = map_to_use_expand(old_iuse)
7509                                         old_use_map = map_to_use_expand(old_use)
7510
7511                                         use_expand.sort()
7512                                         use_expand.insert(0, "USE")
7513                                         
7514                                         for key in use_expand:
7515                                                 if key in use_expand_hidden:
7516                                                         continue
7517                                                 verboseadd += create_use_string(key.upper(),
7518                                                         cur_iuse_map[key], iuse_forced[key],
7519                                                         cur_use_map[key], old_iuse_map[key],
7520                                                         old_use_map[key], is_new,
7521                                                         reinst_flags_map.get(key))
7522
7523                                 if verbosity == 3:
7524                                         # size verbose
7525                                         mysize=0
7526                                         if pkg_type == "ebuild" and pkg_merge:
7527                                                 try:
7528                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
7529                                                                 useflags=pkg_use, debug=self.edebug)
7530                                                 except portage.exception.InvalidDependString, e:
7531                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7532                                                         show_invalid_depstring_notice(x, src_uri, str(e))
7533                                                         del e
7534                                                         return 1
7535                                                 if myfilesdict is None:
7536                                                         myfilesdict="[empty/missing/bad digest]"
7537                                                 else:
7538                                                         for myfetchfile in myfilesdict:
7539                                                                 if myfetchfile not in myfetchlist:
7540                                                                         mysize+=myfilesdict[myfetchfile]
7541                                                                         myfetchlist.append(myfetchfile)
7542                                                         if ordered:
7543                                                                 counters.totalsize += mysize
7544                                                 verboseadd += format_size(mysize)
7545
7546                                         # overlay verbose
7547                                         # assign index for a previous version in the same slot
7548                                         has_previous = False
7549                                         repo_name_prev = None
7550                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7551                                                 metadata["SLOT"])
7552                                         slot_matches = vardb.match(slot_atom)
7553                                         if slot_matches:
7554                                                 has_previous = True
7555                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
7556                                                         ["repository"])[0]
7557
7558                                         # now use the data to generate output
7559                                         if pkg.installed or not has_previous:
7560                                                 repoadd = repo_display.repoStr(repo_path_real)
7561                                         else:
7562                                                 repo_path_prev = None
7563                                                 if repo_name_prev:
7564                                                         repo_path_prev = portdb.getRepositoryPath(
7565                                                                 repo_name_prev)
7566                                                 if repo_path_prev == repo_path_real:
7567                                                         repoadd = repo_display.repoStr(repo_path_real)
7568                                                 else:
7569                                                         repoadd = "%s=>%s" % (
7570                                                                 repo_display.repoStr(repo_path_prev),
7571                                                                 repo_display.repoStr(repo_path_real))
7572                                         if repoadd:
7573                                                 repoadd_set.add(repoadd)
7574
7575                                 xs = [portage.cpv_getkey(pkg_key)] + \
7576                                         list(portage.catpkgsplit(pkg_key)[2:])
7577                                 if xs[2] == "r0":
7578                                         xs[2] = ""
7579                                 else:
7580                                         xs[2] = "-" + xs[2]
7581
7582                                 mywidth = 130
7583                                 if "COLUMNWIDTH" in self.settings:
7584                                         try:
7585                                                 mywidth = int(self.settings["COLUMNWIDTH"])
7586                                         except ValueError, e:
7587                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7588                                                 portage.writemsg(
7589                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7590                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
7591                                                 del e
7592                                 oldlp = mywidth - 30
7593                                 newlp = oldlp - 30
7594
7595                                 # Convert myoldbest from a list to a string.
7596                                 if not myoldbest:
7597                                         myoldbest = ""
7598                                 else:
7599                                         for pos, key in enumerate(myoldbest):
7600                                                 key = portage.catpkgsplit(key)[2] + \
7601                                                         "-" + portage.catpkgsplit(key)[3]
7602                                                 if key[-3:] == "-r0":
7603                                                         key = key[:-3]
7604                                                 myoldbest[pos] = key
7605                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
7606
7607                                 pkg_cp = xs[0]
7608                                 root_config = self.roots[myroot]
7609                                 system_set = root_config.sets["system"]
7610                                 world_set  = root_config.sets["world"]
7611
7612                                 pkg_system = False
7613                                 pkg_world = False
7614                                 try:
7615                                         pkg_system = system_set.findAtomForPackage(pkg)
7616                                         pkg_world  = world_set.findAtomForPackage(pkg)
7617                                         if not (oneshot or pkg_world) and \
7618                                                 myroot == self.target_root and \
7619                                                 favorites_set.findAtomForPackage(pkg):
7620                                                 # Maybe it will be added to world now.
7621                                                 if create_world_atom(pkg, favorites_set, root_config):
7622                                                         pkg_world = True
7623                                 except portage.exception.InvalidDependString:
7624                                         # This is reported elsewhere if relevant.
7625                                         pass
7626
7627                                 def pkgprint(pkg_str):
7628                                         if pkg_merge:
7629                                                 if pkg_system:
7630                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
7631                                                 elif pkg_world:
7632                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
7633                                                 else:
7634                                                         return colorize("PKG_MERGE", pkg_str)
7635                                         elif pkg_status == "uninstall":
7636                                                 return colorize("PKG_UNINSTALL", pkg_str)
7637                                         else:
7638                                                 if pkg_system:
7639                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7640                                                 elif pkg_world:
7641                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
7642                                                 else:
7643                                                         return colorize("PKG_NOMERGE", pkg_str)
7644
7645                                 try:
7646                                         properties = flatten(use_reduce(paren_reduce(
7647                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7648                                 except portage.exception.InvalidDependString, e:
7649                                         if not pkg.installed:
7650                                                 show_invalid_depstring_notice(pkg,
7651                                                         pkg.metadata["PROPERTIES"], str(e))
7652                                                 del e
7653                                                 return 1
7654                                         properties = []
7655                                 interactive = "interactive" in properties
7656                                 if interactive and pkg.operation == "merge":
7657                                         addl = colorize("WARN", "I") + addl[1:]
7658                                         if ordered:
7659                                                 counters.interactive += 1
7660
7661                                 if x[1]!="/":
7662                                         if myoldbest:
7663                                                 myoldbest +=" "
7664                                         if "--columns" in self.myopts:
7665                                                 if "--quiet" in self.myopts:
7666                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
7667                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7668                                                         myprint=myprint+myoldbest
7669                                                         myprint=myprint+darkgreen("to "+x[1])
7670                                                         verboseadd = None
7671                                                 else:
7672                                                         if not pkg_merge:
7673                                                                 myprint = "[%s] %s%s" % \
7674                                                                         (pkgprint(pkg_status.ljust(13)),
7675                                                                         indent, pkgprint(pkg.cp))
7676                                                         else:
7677                                                                 myprint = "[%s %s] %s%s" % \
7678                                                                         (pkgprint(pkg.type_name), addl,
7679                                                                         indent, pkgprint(pkg.cp))
7680                                                         if (newlp-nc_len(myprint)) > 0:
7681                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7682                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7683                                                         if (oldlp-nc_len(myprint)) > 0:
7684                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
7685                                                         myprint=myprint+myoldbest
7686                                                         myprint += darkgreen("to " + pkg.root)
7687                                         else:
7688                                                 if not pkg_merge:
7689                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7690                                                 else:
7691                                                         myprint = "[" + pkg_type + " " + addl + "] "
7692                                                 myprint += indent + pkgprint(pkg_key) + " " + \
7693                                                         myoldbest + darkgreen("to " + myroot)
7694                                 else:
7695                                         if "--columns" in self.myopts:
7696                                                 if "--quiet" in self.myopts:
7697                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
7698                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
7699                                                         myprint=myprint+myoldbest
7700                                                         verboseadd = None
7701                                                 else:
7702                                                         if not pkg_merge:
7703                                                                 myprint = "[%s] %s%s" % \
7704                                                                         (pkgprint(pkg_status.ljust(13)),
7705                                                                         indent, pkgprint(pkg.cp))
7706                                                         else:
7707                                                                 myprint = "[%s %s] %s%s" % \
7708                                                                         (pkgprint(pkg.type_name), addl,
7709                                                                         indent, pkgprint(pkg.cp))
7710                                                         if (newlp-nc_len(myprint)) > 0:
7711                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7712                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7713                                                         if (oldlp-nc_len(myprint)) > 0:
7714                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7715                                                         myprint += myoldbest
7716                                         else:
7717                                                 if not pkg_merge:
7718                                                         myprint = "[%s] %s%s %s" % \
7719                                                                 (pkgprint(pkg_status.ljust(13)),
7720                                                                 indent, pkgprint(pkg.cpv),
7721                                                                 myoldbest)
7722                                                 else:
7723                                                         myprint = "[%s %s] %s%s %s" % \
7724                                                                 (pkgprint(pkg_type), addl, indent,
7725                                                                 pkgprint(pkg.cpv), myoldbest)
7726
7727                                 if columns and pkg.operation == "uninstall":
7728                                         continue
7729                                 p.append((myprint, verboseadd, repoadd))
7730
7731                                 if "--tree" not in self.myopts and \
7732                                         "--quiet" not in self.myopts and \
7733                                         not self._opts_no_restart.intersection(self.myopts) and \
7734                                         pkg.root == self._running_root.root and \
7735                                         portage.match_from_list(
7736                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7737                                         not vardb.cpv_exists(pkg.cpv) and \
7738                                         "--quiet" not in self.myopts:
7739                                                 if mylist_index < len(mylist) - 1:
7740                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7741                                                         p.append(colorize("WARN", "    then resume the merge."))
7742
7743                 out = sys.stdout
7744                 show_repos = repoadd_set and repoadd_set != set(["0"])
7745
7746                 for x in p:
7747                         if isinstance(x, basestring):
7748                                 out.write("%s\n" % (x,))
7749                                 continue
7750
7751                         myprint, verboseadd, repoadd = x
7752
7753                         if verboseadd:
7754                                 myprint += " " + verboseadd
7755
7756                         if show_repos and repoadd:
7757                                 myprint += " " + teal("[%s]" % repoadd)
7758
7759                         out.write("%s\n" % (myprint,))
7760
7761                 for x in blockers:
7762                         print x
7763
7764                 if verbosity == 3:
7765                         print
7766                         print counters
7767                         if show_repos:
7768                                 sys.stdout.write(str(repo_display))
7769
7770                 if "--changelog" in self.myopts:
7771                         print
7772                         for revision,text in changelogs:
7773                                 print bold('*'+revision)
7774                                 sys.stdout.write(text)
7775
7776                 sys.stdout.flush()
7777                 return os.EX_OK
7778
7779         def display_problems(self):
7780                 """
7781                 Display problems with the dependency graph such as slot collisions.
7782                 This is called internally by display() to show the problems _after_
7783                 the merge list where it is most likely to be seen, but if display()
7784                 is not going to be called then this method should be called explicitly
7785                 to ensure that the user is notified of problems with the graph.
7786
7787                 All output goes to stderr, except for unsatisfied dependencies which
7788                 go to stdout for parsing by programs such as autounmask.
7789                 """
7790
7791                 # Note that show_masked_packages() sends it's output to
7792                 # stdout, and some programs such as autounmask parse the
7793                 # output in cases when emerge bails out. However, when
7794                 # show_masked_packages() is called for installed packages
7795                 # here, the message is a warning that is more appropriate
7796                 # to send to stderr, so temporarily redirect stdout to
7797                 # stderr. TODO: Fix output code so there's a cleaner way
7798                 # to redirect everything to stderr.
7799                 sys.stdout.flush()
7800                 sys.stderr.flush()
7801                 stdout = sys.stdout
7802                 try:
7803                         sys.stdout = sys.stderr
7804                         self._display_problems()
7805                 finally:
7806                         sys.stdout = stdout
7807                         sys.stdout.flush()
7808                         sys.stderr.flush()
7809
7810                 # This goes to stdout for parsing by programs like autounmask.
7811                 for pargs, kwargs in self._unsatisfied_deps_for_display:
7812                         self._show_unsatisfied_dep(*pargs, **kwargs)
7813
7814         def _display_problems(self):
7815                 if self._circular_deps_for_display is not None:
7816                         self._show_circular_deps(
7817                                 self._circular_deps_for_display)
7818
7819                 # The user is only notified of a slot conflict if
7820                 # there are no unresolvable blocker conflicts.
7821                 if self._unsatisfied_blockers_for_display is not None:
7822                         self._show_unsatisfied_blockers(
7823                                 self._unsatisfied_blockers_for_display)
7824                 else:
7825                         self._show_slot_collision_notice()
7826
7827                 # TODO: Add generic support for "set problem" handlers so that
7828                 # the below warnings aren't special cases for world only.
7829
7830                 if self._missing_args:
7831                         world_problems = False
7832                         if "world" in self._sets:
7833                                 # Filter out indirect members of world (from nested sets)
7834                                 # since only direct members of world are desired here.
7835                                 world_set = self.roots[self.target_root].sets["world"]
7836                                 for arg, atom in self._missing_args:
7837                                         if arg.name == "world" and atom in world_set:
7838                                                 world_problems = True
7839                                                 break
7840
7841                         if world_problems:
7842                                 sys.stderr.write("\n!!! Problems have been " + \
7843                                         "detected with your world file\n")
7844                                 sys.stderr.write("!!! Please run " + \
7845                                         green("emaint --check world")+"\n\n")
7846
7847                 if self._missing_args:
7848                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7849                                 " Ebuilds for the following packages are either all\n")
7850                         sys.stderr.write(colorize("BAD", "!!!") + \
7851                                 " masked or don't exist:\n")
7852                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
7853                                 self._missing_args) + "\n")
7854
7855                 if self._pprovided_args:
7856                         arg_refs = {}
7857                         for arg, atom in self._pprovided_args:
7858                                 if isinstance(arg, SetArg):
7859                                         parent = arg.name
7860                                         arg_atom = (atom, atom)
7861                                 else:
7862                                         parent = "args"
7863                                         arg_atom = (arg.arg, atom)
7864                                 refs = arg_refs.setdefault(arg_atom, [])
7865                                 if parent not in refs:
7866                                         refs.append(parent)
7867                         msg = []
7868                         msg.append(bad("\nWARNING: "))
7869                         if len(self._pprovided_args) > 1:
7870                                 msg.append("Requested packages will not be " + \
7871                                         "merged because they are listed in\n")
7872                         else:
7873                                 msg.append("A requested package will not be " + \
7874                                         "merged because it is listed in\n")
7875                         msg.append("package.provided:\n\n")
7876                         problems_sets = set()
7877                         for (arg, atom), refs in arg_refs.iteritems():
7878                                 ref_string = ""
7879                                 if refs:
7880                                         problems_sets.update(refs)
7881                                         refs.sort()
7882                                         ref_string = ", ".join(["'%s'" % name for name in refs])
7883                                         ref_string = " pulled in by " + ref_string
7884                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7885                         msg.append("\n")
7886                         if "world" in problems_sets:
7887                                 msg.append("This problem can be solved in one of the following ways:\n\n")
7888                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
7889                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
7890                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
7891                                 msg.append("The best course of action depends on the reason that an offending\n")
7892                                 msg.append("package.provided entry exists.\n\n")
7893                         sys.stderr.write("".join(msg))
7894
7895                 masked_packages = []
7896                 for pkg in self._masked_installed:
7897                         root_config = pkg.root_config
7898                         pkgsettings = self.pkgsettings[pkg.root]
7899                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
7900                         masked_packages.append((root_config, pkgsettings,
7901                                 pkg.cpv, pkg.metadata, mreasons))
7902                 if masked_packages:
7903                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7904                                 " The following installed packages are masked:\n")
7905                         show_masked_packages(masked_packages)
7906                         show_mask_docs()
7907                         print
7908
7909         def calc_changelog(self,ebuildpath,current,next):
7910                 if ebuildpath == None or not os.path.exists(ebuildpath):
7911                         return []
7912                 current = '-'.join(portage.catpkgsplit(current)[1:])
7913                 if current.endswith('-r0'):
7914                         current = current[:-3]
7915                 next = '-'.join(portage.catpkgsplit(next)[1:])
7916                 if next.endswith('-r0'):
7917                         next = next[:-3]
7918                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
7919                 try:
7920                         changelog = open(changelogpath).read()
7921                 except SystemExit, e:
7922                         raise # Needed else can't exit
7923                 except:
7924                         return []
7925                 divisions = self.find_changelog_tags(changelog)
7926                 #print 'XX from',current,'to',next
7927                 #for div,text in divisions: print 'XX',div
7928                 # skip entries for all revisions above the one we are about to emerge
7929                 for i in range(len(divisions)):
7930                         if divisions[i][0]==next:
7931                                 divisions = divisions[i:]
7932                                 break
7933                 # find out how many entries we are going to display
7934                 for i in range(len(divisions)):
7935                         if divisions[i][0]==current:
7936                                 divisions = divisions[:i]
7937                                 break
7938                 else:
7939                     # couldnt find the current revision in the list. display nothing
7940                         return []
7941                 return divisions
7942
7943         def find_changelog_tags(self,changelog):
7944                 divs = []
7945                 release = None
7946                 while 1:
7947                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
7948                         if match is None:
7949                                 if release is not None:
7950                                         divs.append((release,changelog))
7951                                 return divs
7952                         if release is not None:
7953                                 divs.append((release,changelog[:match.start()]))
7954                         changelog = changelog[match.end():]
7955                         release = match.group(1)
7956                         if release.endswith('.ebuild'):
7957                                 release = release[:-7]
7958                         if release.endswith('-r0'):
7959                                 release = release[:-3]
7960
7961         def saveNomergeFavorites(self):
7962                 """Find atoms in favorites that are not in the mergelist and add them
7963                 to the world file if necessary."""
7964                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7965                         "--oneshot", "--onlydeps", "--pretend"):
7966                         if x in self.myopts:
7967                                 return
7968                 root_config = self.roots[self.target_root]
7969                 world_set = root_config.sets["world"]
7970
7971                 world_locked = False
7972                 if hasattr(world_set, "lock"):
7973                         world_set.lock()
7974                         world_locked = True
7975
7976                 if hasattr(world_set, "load"):
7977                         world_set.load() # maybe it's changed on disk
7978
7979                 args_set = self._sets["args"]
7980                 portdb = self.trees[self.target_root]["porttree"].dbapi
7981                 added_favorites = set()
7982                 for x in self._set_nodes:
7983                         pkg_type, root, pkg_key, pkg_status = x
7984                         if pkg_status != "nomerge":
7985                                 continue
7986
7987                         try:
7988                                 myfavkey = create_world_atom(x, args_set, root_config)
7989                                 if myfavkey:
7990                                         if myfavkey in added_favorites:
7991                                                 continue
7992                                         added_favorites.add(myfavkey)
7993                         except portage.exception.InvalidDependString, e:
7994                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7995                                         (pkg_key, str(e)), noiselevel=-1)
7996                                 writemsg("!!! see '%s'\n\n" % os.path.join(
7997                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
7998                                 del e
7999                 all_added = []
8000                 for k in self._sets:
8001                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8002                                 continue
8003                         s = SETPREFIX + k
8004                         if s in world_set:
8005                                 continue
8006                         all_added.append(SETPREFIX + k)
8007                 all_added.extend(added_favorites)
8008                 all_added.sort()
8009                 for a in all_added:
8010                         print ">>> Recording %s in \"world\" favorites file..." % \
8011                                 colorize("INFORM", str(a))
8012                 if all_added:
8013                         world_set.update(all_added)
8014
8015                 if world_locked:
8016                         world_set.unlock()
8017
8018         def loadResumeCommand(self, resume_data, skip_masked=False):
8019                 """
8020                 Add a resume command to the graph and validate it in the process.  This
8021                 will raise a PackageNotFound exception if a package is not available.
8022                 """
8023
8024                 if not isinstance(resume_data, dict):
8025                         return False
8026
8027                 mergelist = resume_data.get("mergelist")
8028                 if not isinstance(mergelist, list):
8029                         mergelist = []
8030
8031                 fakedb = self.mydbapi
8032                 trees = self.trees
8033                 serialized_tasks = []
8034                 masked_tasks = []
8035                 for x in mergelist:
8036                         if not (isinstance(x, list) and len(x) == 4):
8037                                 continue
8038                         pkg_type, myroot, pkg_key, action = x
8039                         if pkg_type not in self.pkg_tree_map:
8040                                 continue
8041                         if action != "merge":
8042                                 continue
8043                         tree_type = self.pkg_tree_map[pkg_type]
8044                         mydb = trees[myroot][tree_type].dbapi
8045                         db_keys = list(self._trees_orig[myroot][
8046                                 tree_type].dbapi._aux_cache_keys)
8047                         try:
8048                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8049                         except KeyError:
8050                                 # It does no exist or it is corrupt.
8051                                 if action == "uninstall":
8052                                         continue
8053                                 raise portage.exception.PackageNotFound(pkg_key)
8054                         installed = action == "uninstall"
8055                         built = pkg_type != "ebuild"
8056                         root_config = self.roots[myroot]
8057                         pkg = Package(built=built, cpv=pkg_key,
8058                                 installed=installed, metadata=metadata,
8059                                 operation=action, root_config=root_config,
8060                                 type_name=pkg_type)
8061                         if pkg_type == "ebuild":
8062                                 pkgsettings = self.pkgsettings[myroot]
8063                                 pkgsettings.setcpv(pkg)
8064                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8065                         self._pkg_cache[pkg] = pkg
8066
8067                         root_config = self.roots[pkg.root]
8068                         if "merge" == pkg.operation and \
8069                                 not visible(root_config.settings, pkg):
8070                                 if skip_masked:
8071                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8072                                 else:
8073                                         self._unsatisfied_deps_for_display.append(
8074                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8075
8076                         fakedb[myroot].cpv_inject(pkg)
8077                         serialized_tasks.append(pkg)
8078                         self.spinner.update()
8079
8080                 if self._unsatisfied_deps_for_display:
8081                         return False
8082
8083                 if not serialized_tasks or "--nodeps" in self.myopts:
8084                         self._serialized_tasks_cache = serialized_tasks
8085                         self._scheduler_graph = self.digraph
8086                 else:
8087                         self._select_package = self._select_pkg_from_graph
8088                         self.myparams.add("selective")
8089
8090                         favorites = resume_data.get("favorites")
8091                         args_set = self._sets["args"]
8092                         if isinstance(favorites, list):
8093                                 args = self._load_favorites(favorites)
8094                         else:
8095                                 args = []
8096
8097                         for task in serialized_tasks:
8098                                 if isinstance(task, Package) and \
8099                                         task.operation == "merge":
8100                                         if not self._add_pkg(task, None):
8101                                                 return False
8102
8103                         # Packages for argument atoms need to be explicitly
8104                         # added via _add_pkg() so that they are included in the
8105                         # digraph (needed at least for --tree display).
8106                         for arg in args:
8107                                 for atom in arg.set:
8108                                         pkg, existing_node = self._select_package(
8109                                                 arg.root_config.root, atom)
8110                                         if existing_node is None and \
8111                                                 pkg is not None:
8112                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8113                                                         root=pkg.root, parent=arg)):
8114                                                         return False
8115
8116                         # Allow unsatisfied deps here to avoid showing a masking
8117                         # message for an unsatisfied dep that isn't necessarily
8118                         # masked.
8119                         if not self._create_graph(allow_unsatisfied=True):
8120                                 return False
8121                         if masked_tasks or self._unsatisfied_deps:
8122                                 # This probably means that a required package
8123                                 # was dropped via --skipfirst. It makes the
8124                                 # resume list invalid, so convert it to a
8125                                 # UnsatisfiedResumeDep exception.
8126                                 raise self.UnsatisfiedResumeDep(self,
8127                                         masked_tasks + self._unsatisfied_deps)
8128                         self._serialized_tasks_cache = None
8129                         try:
8130                                 self.altlist()
8131                         except self._unknown_internal_error:
8132                                 return False
8133
8134                 return True
8135
8136         def _load_favorites(self, favorites):
8137                 """
8138                 Use a list of favorites to resume state from a
8139                 previous select_files() call. This creates similar
8140                 DependencyArg instances to those that would have
8141                 been created by the original select_files() call.
8142                 This allows Package instances to be matched with
8143                 DependencyArg instances during graph creation.
8144                 """
8145                 root_config = self.roots[self.target_root]
8146                 getSetAtoms = root_config.setconfig.getSetAtoms
8147                 sets = root_config.sets
8148                 args = []
8149                 for x in favorites:
8150                         if not isinstance(x, basestring):
8151                                 continue
8152                         if x in ("system", "world"):
8153                                 x = SETPREFIX + x
8154                         if x.startswith(SETPREFIX):
8155                                 s = x[len(SETPREFIX):]
8156                                 if s not in sets:
8157                                         continue
8158                                 if s in self._sets:
8159                                         continue
8160                                 # Recursively expand sets so that containment tests in
8161                                 # self._get_parent_sets() properly match atoms in nested
8162                                 # sets (like if world contains system).
8163                                 expanded_set = InternalPackageSet(
8164                                         initial_atoms=getSetAtoms(s))
8165                                 self._sets[s] = expanded_set
8166                                 args.append(SetArg(arg=x, set=expanded_set,
8167                                         root_config=root_config))
8168                         else:
8169                                 if not portage.isvalidatom(x):
8170                                         continue
8171                                 args.append(AtomArg(arg=x, atom=x,
8172                                         root_config=root_config))
8173
8174                 # Create the "args" package set from atoms and
8175                 # packages given as arguments.
8176                 args_set = self._sets["args"]
8177                 for arg in args:
8178                         if not isinstance(arg, (AtomArg, PackageArg)):
8179                                 continue
8180                         myatom = arg.atom
8181                         if myatom in args_set:
8182                                 continue
8183                         args_set.add(myatom)
8184                 self._set_atoms.update(chain(*self._sets.itervalues()))
8185                 atom_arg_map = self._atom_arg_map
8186                 for arg in args:
8187                         for atom in arg.set:
8188                                 atom_key = (atom, arg.root_config.root)
8189                                 refs = atom_arg_map.get(atom_key)
8190                                 if refs is None:
8191                                         refs = []
8192                                         atom_arg_map[atom_key] = refs
8193                                         if arg not in refs:
8194                                                 refs.append(arg)
8195                 return args
8196
8197         class UnsatisfiedResumeDep(portage.exception.PortageException):
8198                 """
8199                 A dependency of a resume list is not installed. This
8200                 can occur when a required package is dropped from the
8201                 merge list via --skipfirst.
8202                 """
8203                 def __init__(self, depgraph, value):
8204                         portage.exception.PortageException.__init__(self, value)
8205                         self.depgraph = depgraph
8206
8207         class _internal_exception(portage.exception.PortageException):
8208                 def __init__(self, value=""):
8209                         portage.exception.PortageException.__init__(self, value)
8210
8211         class _unknown_internal_error(_internal_exception):
8212                 """
8213                 Used by the depgraph internally to terminate graph creation.
8214                 The specific reason for the failure should have been dumped
8215                 to stderr, unfortunately, the exact reason for the failure
8216                 may not be known.
8217                 """
8218
8219         class _serialize_tasks_retry(_internal_exception):
8220                 """
8221                 This is raised by the _serialize_tasks() method when it needs to
8222                 be called again for some reason. The only case that it's currently
8223                 used for is when neglected dependencies need to be added to the
8224                 graph in order to avoid making a potentially unsafe decision.
8225                 """
8226
8227         class _dep_check_composite_db(portage.dbapi):
8228                 """
8229                 A dbapi-like interface that is optimized for use in dep_check() calls.
8230                 This is built on top of the existing depgraph package selection logic.
8231                 Some packages that have been added to the graph may be masked from this
8232                 view in order to influence the atom preference selection that occurs
8233                 via dep_check().
8234                 """
8235                 def __init__(self, depgraph, root):
8236                         portage.dbapi.__init__(self)
8237                         self._depgraph = depgraph
8238                         self._root = root
8239                         self._match_cache = {}
8240                         self._cpv_pkg_map = {}
8241
8242                 def match(self, atom):
8243                         ret = self._match_cache.get(atom)
8244                         if ret is not None:
8245                                 return ret[:]
8246                         orig_atom = atom
8247                         if "/" not in atom:
8248                                 atom = self._dep_expand(atom)
8249                         pkg, existing = self._depgraph._select_package(self._root, atom)
8250                         if not pkg:
8251                                 ret = []
8252                         else:
8253                                 # Return the highest available from select_package() as well as
8254                                 # any matching slots in the graph db.
8255                                 slots = set()
8256                                 slots.add(pkg.metadata["SLOT"])
8257                                 atom_cp = portage.dep_getkey(atom)
8258                                 if pkg.cp.startswith("virtual/"):
8259                                         # For new-style virtual lookahead that occurs inside
8260                                         # dep_check(), examine all slots. This is needed
8261                                         # so that newer slots will not unnecessarily be pulled in
8262                                         # when a satisfying lower slot is already installed. For
8263                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8264                                         # there's no need to pull in a newer slot to satisfy a
8265                                         # virtual/jdk dependency.
8266                                         for db, pkg_type, built, installed, db_keys in \
8267                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8268                                                 for cpv in db.match(atom):
8269                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8270                                                                 continue
8271                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8272                                 ret = []
8273                                 if self._visible(pkg):
8274                                         self._cpv_pkg_map[pkg.cpv] = pkg
8275                                         ret.append(pkg.cpv)
8276                                 slots.remove(pkg.metadata["SLOT"])
8277                                 while slots:
8278                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8279                                         pkg, existing = self._depgraph._select_package(
8280                                                 self._root, slot_atom)
8281                                         if not pkg:
8282                                                 continue
8283                                         if not self._visible(pkg):
8284                                                 continue
8285                                         self._cpv_pkg_map[pkg.cpv] = pkg
8286                                         ret.append(pkg.cpv)
8287                                 if ret:
8288                                         self._cpv_sort_ascending(ret)
8289                         self._match_cache[orig_atom] = ret
8290                         return ret[:]
8291
8292                 def _visible(self, pkg):
8293                         if pkg.installed and "selective" not in self._depgraph.myparams:
8294                                 try:
8295                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8296                                 except (StopIteration, portage.exception.InvalidDependString):
8297                                         arg = None
8298                                 if arg:
8299                                         return False
8300                         if pkg.installed:
8301                                 try:
8302                                         if not visible(
8303                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8304                                                 return False
8305                                 except portage.exception.InvalidDependString:
8306                                         pass
8307                         return True
8308
8309                 def _dep_expand(self, atom):
8310                         """
8311                         This is only needed for old installed packages that may
8312                         contain atoms that are not fully qualified with a specific
8313                         category. Emulate the cpv_expand() function that's used by
8314                         dbapi.match() in cases like this. If there are multiple
8315                         matches, it's often due to a new-style virtual that has
8316                         been added, so try to filter those out to avoid raising
8317                         a ValueError.
8318                         """
8319                         root_config = self._depgraph.roots[self._root]
8320                         orig_atom = atom
8321                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8322                         if len(expanded_atoms) > 1:
8323                                 non_virtual_atoms = []
8324                                 for x in expanded_atoms:
8325                                         if not portage.dep_getkey(x).startswith("virtual/"):
8326                                                 non_virtual_atoms.append(x)
8327                                 if len(non_virtual_atoms) == 1:
8328                                         expanded_atoms = non_virtual_atoms
8329                         if len(expanded_atoms) > 1:
8330                                 # compatible with portage.cpv_expand()
8331                                 raise portage.exception.AmbiguousPackageName(
8332                                         [portage.dep_getkey(x) for x in expanded_atoms])
8333                         if expanded_atoms:
8334                                 atom = expanded_atoms[0]
8335                         else:
8336                                 null_atom = insert_category_into_atom(atom, "null")
8337                                 null_cp = portage.dep_getkey(null_atom)
8338                                 cat, atom_pn = portage.catsplit(null_cp)
8339                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8340                                 if virts_p:
8341                                         # Allow the resolver to choose which virtual.
8342                                         atom = insert_category_into_atom(atom, "virtual")
8343                                 else:
8344                                         atom = insert_category_into_atom(atom, "null")
8345                         return atom
8346
8347                 def aux_get(self, cpv, wants):
8348                         metadata = self._cpv_pkg_map[cpv].metadata
8349                         return [metadata.get(x, "") for x in wants]
8350
8351         class _package_cache(dict):
8352                 def __init__(self, depgraph):
8353                         dict.__init__(self)
8354                         self._depgraph = depgraph
8355
8356                 def __setitem__(self, k, v):
8357                         dict.__setitem__(self, k, v)
8358                         root_config = self._depgraph.roots[v.root]
8359                         try:
8360                                 if visible(root_config.settings, v) and \
8361                                         not (v.installed and \
8362                                         v.root_config.settings._getMissingKeywords(v.cpv, v.metadata)):
8363                                         root_config.visible_pkgs.cpv_inject(v)
8364                         except portage.exception.InvalidDependString:
8365                                 pass
8366
8367 class RepoDisplay(object):
8368         def __init__(self, roots):
8369                 self._shown_repos = {}
8370                 self._unknown_repo = False
8371                 repo_paths = set()
8372                 for root_config in roots.itervalues():
8373                         portdir = root_config.settings.get("PORTDIR")
8374                         if portdir:
8375                                 repo_paths.add(portdir)
8376                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
8377                         if overlays:
8378                                 repo_paths.update(overlays.split())
8379                 repo_paths = list(repo_paths)
8380                 self._repo_paths = repo_paths
8381                 self._repo_paths_real = [ os.path.realpath(repo_path) \
8382                         for repo_path in repo_paths ]
8383
8384                 # pre-allocate index for PORTDIR so that it always has index 0.
8385                 for root_config in roots.itervalues():
8386                         portdb = root_config.trees["porttree"].dbapi
8387                         portdir = portdb.porttree_root
8388                         if portdir:
8389                                 self.repoStr(portdir)
8390
8391         def repoStr(self, repo_path_real):
8392                 real_index = -1
8393                 if repo_path_real:
8394                         real_index = self._repo_paths_real.index(repo_path_real)
8395                 if real_index == -1:
8396                         s = "?"
8397                         self._unknown_repo = True
8398                 else:
8399                         shown_repos = self._shown_repos
8400                         repo_paths = self._repo_paths
8401                         repo_path = repo_paths[real_index]
8402                         index = shown_repos.get(repo_path)
8403                         if index is None:
8404                                 index = len(shown_repos)
8405                                 shown_repos[repo_path] = index
8406                         s = str(index)
8407                 return s
8408
8409         def __str__(self):
8410                 output = []
8411                 shown_repos = self._shown_repos
8412                 unknown_repo = self._unknown_repo
8413                 if shown_repos or self._unknown_repo:
8414                         output.append("Portage tree and overlays:\n")
8415                 show_repo_paths = list(shown_repos)
8416                 for repo_path, repo_index in shown_repos.iteritems():
8417                         show_repo_paths[repo_index] = repo_path
8418                 if show_repo_paths:
8419                         for index, repo_path in enumerate(show_repo_paths):
8420                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8421                 if unknown_repo:
8422                         output.append(" "+teal("[?]") + \
8423                                 " indicates that the source repository could not be determined\n")
8424                 return "".join(output)
8425
8426 class PackageCounters(object):
8427
8428         def __init__(self):
8429                 self.upgrades   = 0
8430                 self.downgrades = 0
8431                 self.new        = 0
8432                 self.newslot    = 0
8433                 self.reinst     = 0
8434                 self.uninst     = 0
8435                 self.blocks     = 0
8436                 self.blocks_satisfied         = 0
8437                 self.totalsize  = 0
8438                 self.restrict_fetch           = 0
8439                 self.restrict_fetch_satisfied = 0
8440                 self.interactive              = 0
8441
8442         def __str__(self):
8443                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8444                 myoutput = []
8445                 details = []
8446                 myoutput.append("Total: %s package" % total_installs)
8447                 if total_installs != 1:
8448                         myoutput.append("s")
8449                 if total_installs != 0:
8450                         myoutput.append(" (")
8451                 if self.upgrades > 0:
8452                         details.append("%s upgrade" % self.upgrades)
8453                         if self.upgrades > 1:
8454                                 details[-1] += "s"
8455                 if self.downgrades > 0:
8456                         details.append("%s downgrade" % self.downgrades)
8457                         if self.downgrades > 1:
8458                                 details[-1] += "s"
8459                 if self.new > 0:
8460                         details.append("%s new" % self.new)
8461                 if self.newslot > 0:
8462                         details.append("%s in new slot" % self.newslot)
8463                         if self.newslot > 1:
8464                                 details[-1] += "s"
8465                 if self.reinst > 0:
8466                         details.append("%s reinstall" % self.reinst)
8467                         if self.reinst > 1:
8468                                 details[-1] += "s"
8469                 if self.uninst > 0:
8470                         details.append("%s uninstall" % self.uninst)
8471                         if self.uninst > 1:
8472                                 details[-1] += "s"
8473                 if self.interactive > 0:
8474                         details.append("%s %s" % (self.interactive,
8475                                 colorize("WARN", "interactive")))
8476                 myoutput.append(", ".join(details))
8477                 if total_installs != 0:
8478                         myoutput.append(")")
8479                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8480                 if self.restrict_fetch:
8481                         myoutput.append("\nFetch Restriction: %s package" % \
8482                                 self.restrict_fetch)
8483                         if self.restrict_fetch > 1:
8484                                 myoutput.append("s")
8485                 if self.restrict_fetch_satisfied < self.restrict_fetch:
8486                         myoutput.append(bad(" (%s unsatisfied)") % \
8487                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
8488                 if self.blocks > 0:
8489                         myoutput.append("\nConflict: %s block" % \
8490                                 self.blocks)
8491                         if self.blocks > 1:
8492                                 myoutput.append("s")
8493                         if self.blocks_satisfied < self.blocks:
8494                                 myoutput.append(bad(" (%s unsatisfied)") % \
8495                                         (self.blocks - self.blocks_satisfied))
8496                 return "".join(myoutput)
8497
8498 class PollConstants(object):
8499
8500         """
8501         Provides POLL* constants that are equivalent to those from the
8502         select module, for use by PollSelectAdapter.
8503         """
8504
8505         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
8506         v = 1
8507         for k in names:
8508                 locals()[k] = getattr(select, k, v)
8509                 v *= 2
8510         del k, v
8511
8512 class PollSelectAdapter(PollConstants):
8513
8514         """
8515         Use select to emulate a poll object, for
8516         systems that don't support poll().
8517         """
8518
8519         def __init__(self):
8520                 self._registered = {}
8521                 self._select_args = [[], [], []]
8522
8523         def register(self, fd, *args):
8524                 """
8525                 Only POLLIN is currently supported!
8526                 """
8527                 if len(args) > 1:
8528                         raise TypeError(
8529                                 "register expected at most 2 arguments, got " + \
8530                                 repr(1 + len(args)))
8531
8532                 eventmask = PollConstants.POLLIN | \
8533                         PollConstants.POLLPRI | PollConstants.POLLOUT
8534                 if args:
8535                         eventmask = args[0]
8536
8537                 self._registered[fd] = eventmask
8538                 self._select_args = None
8539
8540         def unregister(self, fd):
8541                 self._select_args = None
8542                 del self._registered[fd]
8543
8544         def poll(self, *args):
8545                 if len(args) > 1:
8546                         raise TypeError(
8547                                 "poll expected at most 2 arguments, got " + \
8548                                 repr(1 + len(args)))
8549
8550                 timeout = None
8551                 if args:
8552                         timeout = args[0]
8553
8554                 select_args = self._select_args
8555                 if select_args is None:
8556                         select_args = [self._registered.keys(), [], []]
8557
8558                 if timeout is not None:
8559                         select_args = select_args[:]
8560                         # Translate poll() timeout args to select() timeout args:
8561                         #
8562                         #          | units        | value(s) for indefinite block
8563                         # ---------|--------------|------------------------------
8564                         #   poll   | milliseconds | omitted, negative, or None
8565                         # ---------|--------------|------------------------------
8566                         #   select | seconds      | omitted
8567                         # ---------|--------------|------------------------------
8568
8569                         if timeout is not None and timeout < 0:
8570                                 timeout = None
8571                         if timeout is not None:
8572                                 select_args.append(timeout / 1000)
8573
8574                 select_events = select.select(*select_args)
8575                 poll_events = []
8576                 for fd in select_events[0]:
8577                         poll_events.append((fd, PollConstants.POLLIN))
8578                 return poll_events
8579
8580 class SequentialTaskQueue(SlotObject):
8581
8582         __slots__ = ("max_jobs", "running_tasks") + \
8583                 ("_dirty", "_scheduling", "_task_queue")
8584
8585         def __init__(self, **kwargs):
8586                 SlotObject.__init__(self, **kwargs)
8587                 self._task_queue = deque()
8588                 self.running_tasks = set()
8589                 if self.max_jobs is None:
8590                         self.max_jobs = 1
8591                 self._dirty = True
8592
8593         def add(self, task):
8594                 self._task_queue.append(task)
8595                 self._dirty = True
8596
8597         def addFront(self, task):
8598                 self._task_queue.appendleft(task)
8599                 self._dirty = True
8600
8601         def schedule(self):
8602
8603                 if not self._dirty:
8604                         return False
8605
8606                 if not self:
8607                         return False
8608
8609                 if self._scheduling:
8610                         # Ignore any recursive schedule() calls triggered via
8611                         # self._task_exit().
8612                         return False
8613
8614                 self._scheduling = True
8615
8616                 task_queue = self._task_queue
8617                 running_tasks = self.running_tasks
8618                 max_jobs = self.max_jobs
8619                 state_changed = False
8620
8621                 while task_queue and \
8622                         (max_jobs is True or len(running_tasks) < max_jobs):
8623                         task = task_queue.popleft()
8624                         cancelled = getattr(task, "cancelled", None)
8625                         if not cancelled:
8626                                 running_tasks.add(task)
8627                                 task.addExitListener(self._task_exit)
8628                                 task.start()
8629                         state_changed = True
8630
8631                 self._dirty = False
8632                 self._scheduling = False
8633
8634                 return state_changed
8635
8636         def _task_exit(self, task):
8637                 """
8638                 Since we can always rely on exit listeners being called, the set of
8639                 running tasks is always pruned automatically and there is never any need
8640                 to actively prune it.
8641                 """
8642                 self.running_tasks.remove(task)
8643                 if self._task_queue:
8644                         self._dirty = True
8645
8646         def clear(self):
8647                 self._task_queue.clear()
8648                 running_tasks = self.running_tasks
8649                 while running_tasks:
8650                         task = running_tasks.pop()
8651                         task.removeExitListener(self._task_exit)
8652                         task.cancel()
8653                 self._dirty = False
8654
8655         def __nonzero__(self):
8656                 return bool(self._task_queue or self.running_tasks)
8657
8658         def __len__(self):
8659                 return len(self._task_queue) + len(self.running_tasks)
8660
8661 _can_poll_device = None
8662
8663 def can_poll_device():
8664         """
8665         Test if it's possible to use poll() on a device such as a pty. This
8666         is known to fail on Darwin.
8667         @rtype: bool
8668         @returns: True if poll() on a device succeeds, False otherwise.
8669         """
8670
8671         global _can_poll_device
8672         if _can_poll_device is not None:
8673                 return _can_poll_device
8674
8675         if not hasattr(select, "poll"):
8676                 _can_poll_device = False
8677                 return _can_poll_device
8678
8679         try:
8680                 dev_null = open('/dev/null', 'rb')
8681         except IOError:
8682                 _can_poll_device = False
8683                 return _can_poll_device
8684
8685         p = select.poll()
8686         p.register(dev_null.fileno(), PollConstants.POLLIN)
8687
8688         invalid_request = False
8689         for f, event in p.poll():
8690                 if event & PollConstants.POLLNVAL:
8691                         invalid_request = True
8692                         break
8693         dev_null.close()
8694
8695         _can_poll_device = not invalid_request
8696         return _can_poll_device
8697
8698 def create_poll_instance():
8699         """
8700         Create an instance of select.poll, or an instance of
8701         PollSelectAdapter there is no poll() implementation or
8702         it is broken somehow.
8703         """
8704         if can_poll_device():
8705                 return select.poll()
8706         return PollSelectAdapter()
8707
8708 class PollScheduler(object):
8709
8710         class _sched_iface_class(SlotObject):
8711                 __slots__ = ("register", "schedule", "unregister")
8712
8713         def __init__(self):
8714                 self._max_jobs = 1
8715                 self._max_load = None
8716                 self._jobs = 0
8717                 self._poll_event_queue = []
8718                 self._poll_event_handlers = {}
8719                 self._poll_event_handler_ids = {}
8720                 # Increment id for each new handler.
8721                 self._event_handler_id = 0
8722                 self._poll_obj = create_poll_instance()
8723                 self._scheduling = False
8724
8725         def _schedule(self):
8726                 """
8727                 Calls _schedule_tasks() and automatically returns early from
8728                 any recursive calls to this method that the _schedule_tasks()
8729                 call might trigger. This makes _schedule() safe to call from
8730                 inside exit listeners.
8731                 """
8732                 if self._scheduling:
8733                         return False
8734                 self._scheduling = True
8735                 try:
8736                         return self._schedule_tasks()
8737                 finally:
8738                         self._scheduling = False
8739
8740         def _running_job_count(self):
8741                 return self._jobs
8742
8743         def _can_add_job(self):
8744                 max_jobs = self._max_jobs
8745                 max_load = self._max_load
8746
8747                 if self._max_jobs is not True and \
8748                         self._running_job_count() >= self._max_jobs:
8749                         return False
8750
8751                 if max_load is not None and \
8752                         (max_jobs is True or max_jobs > 1) and \
8753                         self._running_job_count() >= 1:
8754                         try:
8755                                 avg1, avg5, avg15 = os.getloadavg()
8756                         except (AttributeError, OSError), e:
8757                                 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8758                                         noiselevel=-1)
8759                                 del e
8760                                 return False
8761
8762                         if avg1 >= max_load:
8763                                 return False
8764
8765                 return True
8766
8767         def _poll(self, timeout=None):
8768                 """
8769                 All poll() calls pass through here. The poll events
8770                 are added directly to self._poll_event_queue.
8771                 In order to avoid endless blocking, this raises
8772                 StopIteration if timeout is None and there are
8773                 no file descriptors to poll.
8774                 """
8775                 if not self._poll_event_handlers:
8776                         self._schedule()
8777                         if timeout is None and \
8778                                 not self._poll_event_handlers:
8779                                 raise StopIteration(
8780                                         "timeout is None and there are no poll() event handlers")
8781
8782                 # The following error is known to occur with Linux kernel versions
8783                 # less than 2.6.24:
8784                 #
8785                 #   select.error: (4, 'Interrupted system call')
8786                 #
8787                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8788                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8789                 # without any events.
8790                 while True:
8791                         try:
8792                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8793                                 break
8794                         except select.error, e:
8795                                 writemsg_level("\n!!! select error: %s\n" % (e,),
8796                                         level=logging.ERROR, noiselevel=-1)
8797                                 del e
8798                                 if timeout is not None:
8799                                         break
8800
8801         def _next_poll_event(self, timeout=None):
8802                 """
8803                 Since the _schedule_wait() loop is called by event
8804                 handlers from _poll_loop(), maintain a central event
8805                 queue for both of them to share events from a single
8806                 poll() call. In order to avoid endless blocking, this
8807                 raises StopIteration if timeout is None and there are
8808                 no file descriptors to poll.
8809                 """
8810                 if not self._poll_event_queue:
8811                         self._poll(timeout)
8812                 return self._poll_event_queue.pop()
8813
8814         def _poll_loop(self):
8815
8816                 event_handlers = self._poll_event_handlers
8817                 event_handled = False
8818
8819                 try:
8820                         while event_handlers:
8821                                 f, event = self._next_poll_event()
8822                                 handler, reg_id = event_handlers[f]
8823                                 handler(f, event)
8824                                 event_handled = True
8825                 except StopIteration:
8826                         event_handled = True
8827
8828                 if not event_handled:
8829                         raise AssertionError("tight loop")
8830
8831         def _schedule_yield(self):
8832                 """
8833                 Schedule for a short period of time chosen by the scheduler based
8834                 on internal state. Synchronous tasks should call this periodically
8835                 in order to allow the scheduler to service pending poll events. The
8836                 scheduler will call poll() exactly once, without blocking, and any
8837                 resulting poll events will be serviced.
8838                 """
8839                 event_handlers = self._poll_event_handlers
8840                 events_handled = 0
8841
8842                 if not event_handlers:
8843                         return bool(events_handled)
8844
8845                 if not self._poll_event_queue:
8846                         self._poll(0)
8847
8848                 try:
8849                         while event_handlers and self._poll_event_queue:
8850                                 f, event = self._next_poll_event()
8851                                 handler, reg_id = event_handlers[f]
8852                                 handler(f, event)
8853                                 events_handled += 1
8854                 except StopIteration:
8855                         events_handled += 1
8856
8857                 return bool(events_handled)
8858
8859         def _register(self, f, eventmask, handler):
8860                 """
8861                 @rtype: Integer
8862                 @return: A unique registration id, for use in schedule() or
8863                         unregister() calls.
8864                 """
8865                 if f in self._poll_event_handlers:
8866                         raise AssertionError("fd %d is already registered" % f)
8867                 self._event_handler_id += 1
8868                 reg_id = self._event_handler_id
8869                 self._poll_event_handler_ids[reg_id] = f
8870                 self._poll_event_handlers[f] = (handler, reg_id)
8871                 self._poll_obj.register(f, eventmask)
8872                 return reg_id
8873
8874         def _unregister(self, reg_id):
8875                 f = self._poll_event_handler_ids[reg_id]
8876                 self._poll_obj.unregister(f)
8877                 del self._poll_event_handlers[f]
8878                 del self._poll_event_handler_ids[reg_id]
8879
8880         def _schedule_wait(self, wait_ids):
8881                 """
8882                 Schedule until wait_id is not longer registered
8883                 for poll() events.
8884                 @type wait_id: int
8885                 @param wait_id: a task id to wait for
8886                 """
8887                 event_handlers = self._poll_event_handlers
8888                 handler_ids = self._poll_event_handler_ids
8889                 event_handled = False
8890
8891                 if isinstance(wait_ids, int):
8892                         wait_ids = frozenset([wait_ids])
8893
8894                 try:
8895                         while wait_ids.intersection(handler_ids):
8896                                 f, event = self._next_poll_event()
8897                                 handler, reg_id = event_handlers[f]
8898                                 handler(f, event)
8899                                 event_handled = True
8900                 except StopIteration:
8901                         event_handled = True
8902
8903                 return event_handled
8904
8905 class QueueScheduler(PollScheduler):
8906
8907         """
8908         Add instances of SequentialTaskQueue and then call run(). The
8909         run() method returns when no tasks remain.
8910         """
8911
8912         def __init__(self, max_jobs=None, max_load=None):
8913                 PollScheduler.__init__(self)
8914
8915                 if max_jobs is None:
8916                         max_jobs = 1
8917
8918                 self._max_jobs = max_jobs
8919                 self._max_load = max_load
8920                 self.sched_iface = self._sched_iface_class(
8921                         register=self._register,
8922                         schedule=self._schedule_wait,
8923                         unregister=self._unregister)
8924
8925                 self._queues = []
8926                 self._schedule_listeners = []
8927
8928         def add(self, q):
8929                 self._queues.append(q)
8930
8931         def remove(self, q):
8932                 self._queues.remove(q)
8933
8934         def run(self):
8935
8936                 while self._schedule():
8937                         self._poll_loop()
8938
8939                 while self._running_job_count():
8940                         self._poll_loop()
8941
8942         def _schedule_tasks(self):
8943                 """
8944                 @rtype: bool
8945                 @returns: True if there may be remaining tasks to schedule,
8946                         False otherwise.
8947                 """
8948                 while self._can_add_job():
8949                         n = self._max_jobs - self._running_job_count()
8950                         if n < 1:
8951                                 break
8952
8953                         if not self._start_next_job(n):
8954                                 return False
8955
8956                 for q in self._queues:
8957                         if q:
8958                                 return True
8959                 return False
8960
8961         def _running_job_count(self):
8962                 job_count = 0
8963                 for q in self._queues:
8964                         job_count += len(q.running_tasks)
8965                 self._jobs = job_count
8966                 return job_count
8967
8968         def _start_next_job(self, n=1):
8969                 started_count = 0
8970                 for q in self._queues:
8971                         initial_job_count = len(q.running_tasks)
8972                         q.schedule()
8973                         final_job_count = len(q.running_tasks)
8974                         if final_job_count > initial_job_count:
8975                                 started_count += (final_job_count - initial_job_count)
8976                         if started_count >= n:
8977                                 break
8978                 return started_count
8979
8980 class TaskScheduler(object):
8981
8982         """
8983         A simple way to handle scheduling of AsynchrousTask instances. Simply
8984         add tasks and call run(). The run() method returns when no tasks remain.
8985         """
8986
8987         def __init__(self, max_jobs=None, max_load=None):
8988                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
8989                 self._scheduler = QueueScheduler(
8990                         max_jobs=max_jobs, max_load=max_load)
8991                 self.sched_iface = self._scheduler.sched_iface
8992                 self.run = self._scheduler.run
8993                 self._scheduler.add(self._queue)
8994
8995         def add(self, task):
8996                 self._queue.add(task)
8997
8998         def run(self):
8999                 self._scheduler.schedule()
9000
9001 class JobStatusDisplay(object):
9002
9003         _bound_properties = ("curval", "failed", "running")
9004         _jobs_column_width = 48
9005
9006         # Don't update the display unless at least this much
9007         # time has passed, in units of seconds.
9008         _min_display_latency = 2
9009
9010         _default_term_codes = {
9011                 'cr'  : '\r',
9012                 'el'  : '\x1b[K',
9013                 'nel' : '\n',
9014         }
9015
9016         _termcap_name_map = {
9017                 'carriage_return' : 'cr',
9018                 'clr_eol'         : 'el',
9019                 'newline'         : 'nel',
9020         }
9021
9022         def __init__(self, out=sys.stdout, quiet=False):
9023                 object.__setattr__(self, "out", out)
9024                 object.__setattr__(self, "quiet", quiet)
9025                 object.__setattr__(self, "maxval", 0)
9026                 object.__setattr__(self, "merges", 0)
9027                 object.__setattr__(self, "_changed", False)
9028                 object.__setattr__(self, "_displayed", False)
9029                 object.__setattr__(self, "_last_display_time", 0)
9030                 object.__setattr__(self, "width", 80)
9031                 self.reset()
9032
9033                 isatty = hasattr(out, "isatty") and out.isatty()
9034                 object.__setattr__(self, "_isatty", isatty)
9035                 if not isatty or not self._init_term():
9036                         term_codes = {}
9037                         for k, capname in self._termcap_name_map.iteritems():
9038                                 term_codes[k] = self._default_term_codes[capname]
9039                         object.__setattr__(self, "_term_codes", term_codes)
9040
9041         def _init_term(self):
9042                 """
9043                 Initialize term control codes.
9044                 @rtype: bool
9045                 @returns: True if term codes were successfully initialized,
9046                         False otherwise.
9047                 """
9048
9049                 term_type = os.environ.get("TERM", "vt100")
9050                 tigetstr = None
9051
9052                 try:
9053                         import curses
9054                         try:
9055                                 curses.setupterm(term_type, self.out.fileno())
9056                                 tigetstr = curses.tigetstr
9057                         except curses.error:
9058                                 pass
9059                 except ImportError:
9060                         pass
9061
9062                 if tigetstr is None:
9063                         return False
9064
9065                 term_codes = {}
9066                 for k, capname in self._termcap_name_map.iteritems():
9067                         code = tigetstr(capname)
9068                         if code is None:
9069                                 code = self._default_term_codes[capname]
9070                         term_codes[k] = code
9071                 object.__setattr__(self, "_term_codes", term_codes)
9072                 return True
9073
9074         def _format_msg(self, msg):
9075                 return ">>> %s" % msg
9076
9077         def _erase(self):
9078                 self.out.write(
9079                         self._term_codes['carriage_return'] + \
9080                         self._term_codes['clr_eol'])
9081                 self.out.flush()
9082                 self._displayed = False
9083
9084         def _display(self, line):
9085                 self.out.write(line)
9086                 self.out.flush()
9087                 self._displayed = True
9088
9089         def _update(self, msg):
9090
9091                 out = self.out
9092                 if not self._isatty:
9093                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9094                         self.out.flush()
9095                         self._displayed = True
9096                         return
9097
9098                 if self._displayed:
9099                         self._erase()
9100
9101                 self._display(self._format_msg(msg))
9102
9103         def displayMessage(self, msg):
9104
9105                 was_displayed = self._displayed
9106
9107                 if self._isatty and self._displayed:
9108                         self._erase()
9109
9110                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9111                 self.out.flush()
9112                 self._displayed = False
9113
9114                 if was_displayed:
9115                         self._changed = True
9116                         self.display()
9117
9118         def reset(self):
9119                 self.maxval = 0
9120                 self.merges = 0
9121                 for name in self._bound_properties:
9122                         object.__setattr__(self, name, 0)
9123
9124                 if self._displayed:
9125                         self.out.write(self._term_codes['newline'])
9126                         self.out.flush()
9127                         self._displayed = False
9128
9129         def __setattr__(self, name, value):
9130                 old_value = getattr(self, name)
9131                 if value == old_value:
9132                         return
9133                 object.__setattr__(self, name, value)
9134                 if name in self._bound_properties:
9135                         self._property_change(name, old_value, value)
9136
9137         def _property_change(self, name, old_value, new_value):
9138                 self._changed = True
9139                 self.display()
9140
9141         def _load_avg_str(self):
9142                 try:
9143                         avg = os.getloadavg()
9144                 except (AttributeError, OSError), e:
9145                         return str(e)
9146
9147                 max_avg = max(avg)
9148
9149                 if max_avg < 10:
9150                         digits = 2
9151                 elif max_avg < 100:
9152                         digits = 1
9153                 else:
9154                         digits = 0
9155
9156                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9157
9158         def display(self):
9159                 """
9160                 Display status on stdout, but only if something has
9161                 changed since the last call.
9162                 """
9163
9164                 if self.quiet:
9165                         return
9166
9167                 current_time = time.time()
9168                 time_delta = current_time - self._last_display_time
9169                 if self._displayed and \
9170                         not self._changed:
9171                         if not self._isatty:
9172                                 return
9173                         if time_delta < self._min_display_latency:
9174                                 return
9175
9176                 self._last_display_time = current_time
9177                 self._changed = False
9178                 self._display_status()
9179
9180         def _display_status(self):
9181                 # Don't use len(self._completed_tasks) here since that also
9182                 # can include uninstall tasks.
9183                 curval_str = str(self.curval)
9184                 maxval_str = str(self.maxval)
9185                 running_str = str(self.running)
9186                 failed_str = str(self.failed)
9187                 load_avg_str = self._load_avg_str()
9188
9189                 color_output = StringIO.StringIO()
9190                 plain_output = StringIO.StringIO()
9191                 style_file = portage.output.ConsoleStyleFile(color_output)
9192                 style_file.write_listener = plain_output
9193                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9194                 style_writer.style_listener = style_file.new_styles
9195                 f = formatter.AbstractFormatter(style_writer)
9196
9197                 number_style = "INFORM"
9198                 f.add_literal_data("Jobs: ")
9199                 f.push_style(number_style)
9200                 f.add_literal_data(curval_str)
9201                 f.pop_style()
9202                 f.add_literal_data(" of ")
9203                 f.push_style(number_style)
9204                 f.add_literal_data(maxval_str)
9205                 f.pop_style()
9206                 f.add_literal_data(" complete")
9207
9208                 if self.running:
9209                         f.add_literal_data(", ")
9210                         f.push_style(number_style)
9211                         f.add_literal_data(running_str)
9212                         f.pop_style()
9213                         f.add_literal_data(" running")
9214
9215                 if self.failed:
9216                         f.add_literal_data(", ")
9217                         f.push_style(number_style)
9218                         f.add_literal_data(failed_str)
9219                         f.pop_style()
9220                         f.add_literal_data(" failed")
9221
9222                 padding = self._jobs_column_width - len(plain_output.getvalue())
9223                 if padding > 0:
9224                         f.add_literal_data(padding * " ")
9225
9226                 f.add_literal_data("Load avg: ")
9227                 f.add_literal_data(load_avg_str)
9228
9229                 # Truncate to fit width, to avoid making the terminal scroll if the
9230                 # line overflows (happens when the load average is large).
9231                 plain_output = plain_output.getvalue()
9232                 if self._isatty and len(plain_output) > self.width:
9233                         # Use plain_output here since it's easier to truncate
9234                         # properly than the color output which contains console
9235                         # color codes.
9236                         self._update(plain_output[:self.width])
9237                 else:
9238                         self._update(color_output.getvalue())
9239
9240                 xtermTitle(" ".join(plain_output.split()))
9241
9242 class Scheduler(PollScheduler):
9243
9244         _opts_ignore_blockers = \
9245                 frozenset(["--buildpkgonly",
9246                 "--fetchonly", "--fetch-all-uri",
9247                 "--nodeps", "--pretend"])
9248
9249         _opts_no_background = \
9250                 frozenset(["--pretend",
9251                 "--fetchonly", "--fetch-all-uri"])
9252
9253         _opts_no_restart = frozenset(["--buildpkgonly",
9254                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9255
9256         _bad_resume_opts = set(["--ask", "--changelog",
9257                 "--resume", "--skipfirst"])
9258
9259         _fetch_log = "/var/log/emerge-fetch.log"
9260
9261         class _iface_class(SlotObject):
9262                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9263                         "dblinkElog", "fetch", "register", "schedule",
9264                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9265                         "unregister")
9266
9267         class _fetch_iface_class(SlotObject):
9268                 __slots__ = ("log_file", "schedule")
9269
9270         _task_queues_class = slot_dict_class(
9271                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9272
9273         class _build_opts_class(SlotObject):
9274                 __slots__ = ("buildpkg", "buildpkgonly",
9275                         "fetch_all_uri", "fetchonly", "pretend")
9276
9277         class _binpkg_opts_class(SlotObject):
9278                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9279
9280         class _pkg_count_class(SlotObject):
9281                 __slots__ = ("curval", "maxval")
9282
9283         class _emerge_log_class(SlotObject):
9284                 __slots__ = ("xterm_titles",)
9285
9286                 def log(self, *pargs, **kwargs):
9287                         if not self.xterm_titles:
9288                                 # Avoid interference with the scheduler's status display.
9289                                 kwargs.pop("short_msg", None)
9290                         emergelog(self.xterm_titles, *pargs, **kwargs)
9291
9292         class _failed_pkg(SlotObject):
9293                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9294
9295         class _ConfigPool(object):
9296                 """Interface for a task to temporarily allocate a config
9297                 instance from a pool. This allows a task to be constructed
9298                 long before the config instance actually becomes needed, like
9299                 when prefetchers are constructed for the whole merge list."""
9300                 __slots__ = ("_root", "_allocate", "_deallocate")
9301                 def __init__(self, root, allocate, deallocate):
9302                         self._root = root
9303                         self._allocate = allocate
9304                         self._deallocate = deallocate
9305                 def allocate(self):
9306                         return self._allocate(self._root)
9307                 def deallocate(self, settings):
9308                         self._deallocate(settings)
9309
9310         class _unknown_internal_error(portage.exception.PortageException):
9311                 """
9312                 Used internally to terminate scheduling. The specific reason for
9313                 the failure should have been dumped to stderr.
9314                 """
9315                 def __init__(self, value=""):
9316                         portage.exception.PortageException.__init__(self, value)
9317
9318         def __init__(self, settings, trees, mtimedb, myopts,
9319                 spinner, mergelist, favorites, digraph):
9320                 PollScheduler.__init__(self)
9321                 self.settings = settings
9322                 self.target_root = settings["ROOT"]
9323                 self.trees = trees
9324                 self.myopts = myopts
9325                 self._spinner = spinner
9326                 self._mtimedb = mtimedb
9327                 self._mergelist = mergelist
9328                 self._favorites = favorites
9329                 self._args_set = InternalPackageSet(favorites)
9330                 self._build_opts = self._build_opts_class()
9331                 for k in self._build_opts.__slots__:
9332                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9333                 self._binpkg_opts = self._binpkg_opts_class()
9334                 for k in self._binpkg_opts.__slots__:
9335                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9336
9337                 self.curval = 0
9338                 self._logger = self._emerge_log_class()
9339                 self._task_queues = self._task_queues_class()
9340                 for k in self._task_queues.allowed_keys:
9341                         setattr(self._task_queues, k,
9342                                 SequentialTaskQueue())
9343                 self._status_display = JobStatusDisplay()
9344                 self._max_load = myopts.get("--load-average")
9345                 max_jobs = myopts.get("--jobs")
9346                 if max_jobs is None:
9347                         max_jobs = 1
9348                 self._set_max_jobs(max_jobs)
9349
9350                 # The root where the currently running
9351                 # portage instance is installed.
9352                 self._running_root = trees["/"]["root_config"]
9353                 self.edebug = 0
9354                 if settings.get("PORTAGE_DEBUG", "") == "1":
9355                         self.edebug = 1
9356                 self.pkgsettings = {}
9357                 self._config_pool = {}
9358                 self._blocker_db = {}
9359                 for root in trees:
9360                         self._config_pool[root] = []
9361                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9362
9363                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9364                         schedule=self._schedule_fetch)
9365                 self._sched_iface = self._iface_class(
9366                         dblinkEbuildPhase=self._dblink_ebuild_phase,
9367                         dblinkDisplayMerge=self._dblink_display_merge,
9368                         dblinkElog=self._dblink_elog,
9369                         fetch=fetch_iface, register=self._register,
9370                         schedule=self._schedule_wait,
9371                         scheduleSetup=self._schedule_setup,
9372                         scheduleUnpack=self._schedule_unpack,
9373                         scheduleYield=self._schedule_yield,
9374                         unregister=self._unregister)
9375
9376                 self._prefetchers = weakref.WeakValueDictionary()
9377                 self._pkg_queue = []
9378                 self._completed_tasks = set()
9379
9380                 self._failed_pkgs = []
9381                 self._failed_pkgs_all = []
9382                 self._failed_pkgs_die_msgs = []
9383                 self._post_mod_echo_msgs = []
9384                 self._parallel_fetch = False
9385                 merge_count = len([x for x in mergelist \
9386                         if isinstance(x, Package) and x.operation == "merge"])
9387                 self._pkg_count = self._pkg_count_class(
9388                         curval=0, maxval=merge_count)
9389                 self._status_display.maxval = self._pkg_count.maxval
9390
9391                 # The load average takes some time to respond when new
9392                 # jobs are added, so we need to limit the rate of adding
9393                 # new jobs.
9394                 self._job_delay_max = 10
9395                 self._job_delay_factor = 1.0
9396                 self._job_delay_exp = 1.5
9397                 self._previous_job_start_time = None
9398
9399                 self._set_digraph(digraph)
9400
9401                 # This is used to memoize the _choose_pkg() result when
9402                 # no packages can be chosen until one of the existing
9403                 # jobs completes.
9404                 self._choose_pkg_return_early = False
9405
9406                 features = self.settings.features
9407                 if "parallel-fetch" in features and \
9408                         not ("--pretend" in self.myopts or \
9409                         "--fetch-all-uri" in self.myopts or \
9410                         "--fetchonly" in self.myopts):
9411                         if "distlocks" not in features:
9412                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9413                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
9414                                         "requires the distlocks feature enabled"+"\n",
9415                                         noiselevel=-1)
9416                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
9417                                         "thus parallel-fetching is being disabled"+"\n",
9418                                         noiselevel=-1)
9419                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9420                         elif len(mergelist) > 1:
9421                                 self._parallel_fetch = True
9422
9423                 if self._parallel_fetch:
9424                                 # clear out existing fetch log if it exists
9425                                 try:
9426                                         open(self._fetch_log, 'w')
9427                                 except EnvironmentError:
9428                                         pass
9429
9430                 self._running_portage = None
9431                 portage_match = self._running_root.trees["vartree"].dbapi.match(
9432                         portage.const.PORTAGE_PACKAGE_ATOM)
9433                 if portage_match:
9434                         cpv = portage_match.pop()
9435                         self._running_portage = self._pkg(cpv, "installed",
9436                                 self._running_root, installed=True)
9437
9438         def _poll(self, timeout=None):
9439                 self._schedule()
9440                 PollScheduler._poll(self, timeout=timeout)
9441
9442         def _set_max_jobs(self, max_jobs):
9443                 self._max_jobs = max_jobs
9444                 self._task_queues.jobs.max_jobs = max_jobs
9445
9446         def _background_mode(self):
9447                 """
9448                 Check if background mode is enabled and adjust states as necessary.
9449
9450                 @rtype: bool
9451                 @returns: True if background mode is enabled, False otherwise.
9452                 """
9453                 background = (self._max_jobs is True or \
9454                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
9455                         not bool(self._opts_no_background.intersection(self.myopts))
9456
9457                 if background:
9458                         interactive_tasks = self._get_interactive_tasks()
9459                         if interactive_tasks:
9460                                 background = False
9461                                 writemsg_level(">>> Sending package output to stdio due " + \
9462                                         "to interactive package(s):\n",
9463                                         level=logging.INFO, noiselevel=-1)
9464                                 msg = [""]
9465                                 for pkg in interactive_tasks:
9466                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
9467                                         if pkg.root != "/":
9468                                                 pkg_str += " for " + pkg.root
9469                                         msg.append(pkg_str)
9470                                 msg.append("")
9471                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
9472                                         level=logging.INFO, noiselevel=-1)
9473                                 if self._max_jobs is True or self._max_jobs > 1:
9474                                         self._set_max_jobs(1)
9475                                         writemsg_level(">>> Setting --jobs=1 due " + \
9476                                                 "to the above interactive package(s)\n",
9477                                                 level=logging.INFO, noiselevel=-1)
9478
9479                 self._status_display.quiet = \
9480                         not background or \
9481                         ("--quiet" in self.myopts and \
9482                         "--verbose" not in self.myopts)
9483
9484                 self._logger.xterm_titles = \
9485                         "notitles" not in self.settings.features and \
9486                         self._status_display.quiet
9487
9488                 return background
9489
9490         def _get_interactive_tasks(self):
9491                 from portage import flatten
9492                 from portage.dep import use_reduce, paren_reduce
9493                 interactive_tasks = []
9494                 for task in self._mergelist:
9495                         if not (isinstance(task, Package) and \
9496                                 task.operation == "merge"):
9497                                 continue
9498                         try:
9499                                 properties = flatten(use_reduce(paren_reduce(
9500                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9501                         except portage.exception.InvalidDependString, e:
9502                                 show_invalid_depstring_notice(task,
9503                                         task.metadata["PROPERTIES"], str(e))
9504                                 raise self._unknown_internal_error()
9505                         if "interactive" in properties:
9506                                 interactive_tasks.append(task)
9507                 return interactive_tasks
9508
9509         def _set_digraph(self, digraph):
9510                 if "--nodeps" in self.myopts or \
9511                         (self._max_jobs is not True and self._max_jobs < 2):
9512                         # save some memory
9513                         self._digraph = None
9514                         return
9515
9516                 self._digraph = digraph
9517                 self._prune_digraph()
9518
9519         def _prune_digraph(self):
9520                 """
9521                 Prune any root nodes that are irrelevant.
9522                 """
9523
9524                 graph = self._digraph
9525                 completed_tasks = self._completed_tasks
9526                 removed_nodes = set()
9527                 while True:
9528                         for node in graph.root_nodes():
9529                                 if not isinstance(node, Package) or \
9530                                         (node.installed and node.operation == "nomerge") or \
9531                                         node.onlydeps or \
9532                                         node in completed_tasks:
9533                                         removed_nodes.add(node)
9534                         if removed_nodes:
9535                                 graph.difference_update(removed_nodes)
9536                         if not removed_nodes:
9537                                 break
9538                         removed_nodes.clear()
9539
9540         class _pkg_failure(portage.exception.PortageException):
9541                 """
9542                 An instance of this class is raised by unmerge() when
9543                 an uninstallation fails.
9544                 """
9545                 status = 1
9546                 def __init__(self, *pargs):
9547                         portage.exception.PortageException.__init__(self, pargs)
9548                         if pargs:
9549                                 self.status = pargs[0]
9550
9551         def _schedule_fetch(self, fetcher):
9552                 """
9553                 Schedule a fetcher on the fetch queue, in order to
9554                 serialize access to the fetch log.
9555                 """
9556                 self._task_queues.fetch.addFront(fetcher)
9557
9558         def _schedule_setup(self, setup_phase):
9559                 """
9560                 Schedule a setup phase on the merge queue, in order to
9561                 serialize unsandboxed access to the live filesystem.
9562                 """
9563                 self._task_queues.merge.addFront(setup_phase)
9564                 self._schedule()
9565
9566         def _schedule_unpack(self, unpack_phase):
9567                 """
9568                 Schedule an unpack phase on the unpack queue, in order
9569                 to serialize $DISTDIR access for live ebuilds.
9570                 """
9571                 self._task_queues.unpack.add(unpack_phase)
9572
9573         def _find_blockers(self, new_pkg):
9574                 """
9575                 Returns a callable which should be called only when
9576                 the vdb lock has been acquired.
9577                 """
9578                 def get_blockers():
9579                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9580                 return get_blockers
9581
9582         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9583                 if self._opts_ignore_blockers.intersection(self.myopts):
9584                         return None
9585
9586                 # Call gc.collect() here to avoid heap overflow that
9587                 # triggers 'Cannot allocate memory' errors (reported
9588                 # with python-2.5).
9589                 import gc
9590                 gc.collect()
9591
9592                 blocker_db = self._blocker_db[new_pkg.root]
9593
9594                 blocker_dblinks = []
9595                 for blocking_pkg in blocker_db.findInstalledBlockers(
9596                         new_pkg, acquire_lock=acquire_lock):
9597                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
9598                                 continue
9599                         if new_pkg.cpv == blocking_pkg.cpv:
9600                                 continue
9601                         blocker_dblinks.append(portage.dblink(
9602                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9603                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9604                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
9605
9606                 gc.collect()
9607
9608                 return blocker_dblinks
9609
9610         def _dblink_pkg(self, pkg_dblink):
9611                 cpv = pkg_dblink.mycpv
9612                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9613                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9614                 installed = type_name == "installed"
9615                 return self._pkg(cpv, type_name, root_config, installed=installed)
9616
9617         def _append_to_log_path(self, log_path, msg):
9618                 f = open(log_path, 'a')
9619                 try:
9620                         f.write(msg)
9621                 finally:
9622                         f.close()
9623
9624         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9625
9626                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9627                 log_file = None
9628                 out = sys.stdout
9629                 background = self._background
9630
9631                 if background and log_path is not None:
9632                         log_file = open(log_path, 'a')
9633                         out = log_file
9634
9635                 try:
9636                         for msg in msgs:
9637                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9638                 finally:
9639                         if log_file is not None:
9640                                 log_file.close()
9641
9642         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9643                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9644                 background = self._background
9645
9646                 if log_path is None:
9647                         if not (background and level < logging.WARN):
9648                                 portage.util.writemsg_level(msg,
9649                                         level=level, noiselevel=noiselevel)
9650                 else:
9651                         if not background:
9652                                 portage.util.writemsg_level(msg,
9653                                         level=level, noiselevel=noiselevel)
9654                         self._append_to_log_path(log_path, msg)
9655
9656         def _dblink_ebuild_phase(self,
9657                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9658                 """
9659                 Using this callback for merge phases allows the scheduler
9660                 to run while these phases execute asynchronously, and allows
9661                 the scheduler control output handling.
9662                 """
9663
9664                 scheduler = self._sched_iface
9665                 settings = pkg_dblink.settings
9666                 pkg = self._dblink_pkg(pkg_dblink)
9667                 background = self._background
9668                 log_path = settings.get("PORTAGE_LOG_FILE")
9669
9670                 ebuild_phase = EbuildPhase(background=background,
9671                         pkg=pkg, phase=phase, scheduler=scheduler,
9672                         settings=settings, tree=pkg_dblink.treetype)
9673                 ebuild_phase.start()
9674                 ebuild_phase.wait()
9675
9676                 return ebuild_phase.returncode
9677
9678         def _check_manifests(self):
9679                 # Verify all the manifests now so that the user is notified of failure
9680                 # as soon as possible.
9681                 if "strict" not in self.settings.features or \
9682                         "--fetchonly" in self.myopts or \
9683                         "--fetch-all-uri" in self.myopts:
9684                         return os.EX_OK
9685
9686                 shown_verifying_msg = False
9687                 quiet_settings = {}
9688                 for myroot, pkgsettings in self.pkgsettings.iteritems():
9689                         quiet_config = portage.config(clone=pkgsettings)
9690                         quiet_config["PORTAGE_QUIET"] = "1"
9691                         quiet_config.backup_changes("PORTAGE_QUIET")
9692                         quiet_settings[myroot] = quiet_config
9693                         del quiet_config
9694
9695                 for x in self._mergelist:
9696                         if not isinstance(x, Package) or \
9697                                 x.type_name != "ebuild":
9698                                 continue
9699
9700                         if not shown_verifying_msg:
9701                                 shown_verifying_msg = True
9702                                 self._status_msg("Verifying ebuild manifests")
9703
9704                         root_config = x.root_config
9705                         portdb = root_config.trees["porttree"].dbapi
9706                         quiet_config = quiet_settings[root_config.root]
9707                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9708                         if not portage.digestcheck([], quiet_config, strict=True):
9709                                 return 1
9710
9711                 return os.EX_OK
9712
9713         def _add_prefetchers(self):
9714
9715                 if not self._parallel_fetch:
9716                         return
9717
9718                 if self._parallel_fetch:
9719                         self._status_msg("Starting parallel fetch")
9720
9721                         prefetchers = self._prefetchers
9722                         getbinpkg = "--getbinpkg" in self.myopts
9723
9724                         # In order to avoid "waiting for lock" messages
9725                         # at the beginning, which annoy users, never
9726                         # spawn a prefetcher for the first package.
9727                         for pkg in self._mergelist[1:]:
9728                                 prefetcher = self._create_prefetcher(pkg)
9729                                 if prefetcher is not None:
9730                                         self._task_queues.fetch.add(prefetcher)
9731                                         prefetchers[pkg] = prefetcher
9732
9733         def _create_prefetcher(self, pkg):
9734                 """
9735                 @return: a prefetcher, or None if not applicable
9736                 """
9737                 prefetcher = None
9738
9739                 if not isinstance(pkg, Package):
9740                         pass
9741
9742                 elif pkg.type_name == "ebuild":
9743
9744                         prefetcher = EbuildFetcher(background=True,
9745                                 config_pool=self._ConfigPool(pkg.root,
9746                                 self._allocate_config, self._deallocate_config),
9747                                 fetchonly=1, logfile=self._fetch_log,
9748                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9749
9750                 elif pkg.type_name == "binary" and \
9751                         "--getbinpkg" in self.myopts and \
9752                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9753
9754                         prefetcher = BinpkgFetcher(background=True,
9755                                 logfile=self._fetch_log, pkg=pkg,
9756                                 scheduler=self._sched_iface)
9757
9758                 return prefetcher
9759
9760         def _is_restart_scheduled(self):
9761                 """
9762                 Check if the merge list contains a replacement
9763                 for the current running instance, that will result
9764                 in restart after merge.
9765                 @rtype: bool
9766                 @returns: True if a restart is scheduled, False otherwise.
9767                 """
9768                 if self._opts_no_restart.intersection(self.myopts):
9769                         return False
9770
9771                 mergelist = self._mergelist
9772
9773                 for i, pkg in enumerate(mergelist):
9774                         if self._is_restart_necessary(pkg) and \
9775                                 i != len(mergelist) - 1:
9776                                 return True
9777
9778                 return False
9779
9780         def _is_restart_necessary(self, pkg):
9781                 """
9782                 @return: True if merging the given package
9783                         requires restart, False otherwise.
9784                 """
9785
9786                 # Figure out if we need a restart.
9787                 if pkg.root == self._running_root.root and \
9788                         portage.match_from_list(
9789                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9790                         if self._running_portage:
9791                                 return cmp(pkg, self._running_portage) != 0
9792                         return True
9793                 return False
9794
9795         def _restart_if_necessary(self, pkg):
9796                 """
9797                 Use execv() to restart emerge. This happens
9798                 if portage upgrades itself and there are
9799                 remaining packages in the list.
9800                 """
9801
9802                 if self._opts_no_restart.intersection(self.myopts):
9803                         return
9804
9805                 if not self._is_restart_necessary(pkg):
9806                         return
9807
9808                 if pkg == self._mergelist[-1]:
9809                         return
9810
9811                 self._main_loop_cleanup()
9812
9813                 logger = self._logger
9814                 pkg_count = self._pkg_count
9815                 mtimedb = self._mtimedb
9816                 bad_resume_opts = self._bad_resume_opts
9817
9818                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9819                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9820
9821                 logger.log(" *** RESTARTING " + \
9822                         "emerge via exec() after change of " + \
9823                         "portage version.")
9824
9825                 mtimedb["resume"]["mergelist"].remove(list(pkg))
9826                 mtimedb.commit()
9827                 portage.run_exitfuncs()
9828                 mynewargv = [sys.argv[0], "--resume"]
9829                 resume_opts = self.myopts.copy()
9830                 # For automatic resume, we need to prevent
9831                 # any of bad_resume_opts from leaking in
9832                 # via EMERGE_DEFAULT_OPTS.
9833                 resume_opts["--ignore-default-opts"] = True
9834                 for myopt, myarg in resume_opts.iteritems():
9835                         if myopt not in bad_resume_opts:
9836                                 if myarg is True:
9837                                         mynewargv.append(myopt)
9838                                 else:
9839                                         mynewargv.append(myopt +"="+ str(myarg))
9840                 # priority only needs to be adjusted on the first run
9841                 os.environ["PORTAGE_NICENESS"] = "0"
9842                 os.execv(mynewargv[0], mynewargv)
9843
9844         def merge(self):
9845
9846                 if "--resume" in self.myopts:
9847                         # We're resuming.
9848                         portage.writemsg_stdout(
9849                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9850                         self._logger.log(" *** Resuming merge...")
9851
9852                 self._save_resume_list()
9853
9854                 try:
9855                         self._background = self._background_mode()
9856                 except self._unknown_internal_error:
9857                         return 1
9858
9859                 for root in self.trees:
9860                         root_config = self.trees[root]["root_config"]
9861
9862                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
9863                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
9864                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
9865                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
9866                         if not tmpdir or not os.path.isdir(tmpdir):
9867                                 msg = "The directory specified in your " + \
9868                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
9869                                 "does not exist. Please create this " + \
9870                                 "directory or correct your PORTAGE_TMPDIR setting."
9871                                 msg = textwrap.wrap(msg, 70)
9872                                 out = portage.output.EOutput()
9873                                 for l in msg:
9874                                         out.eerror(l)
9875                                 return 1
9876
9877                         if self._background:
9878                                 root_config.settings.unlock()
9879                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
9880                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
9881                                 root_config.settings.lock()
9882
9883                         self.pkgsettings[root] = portage.config(
9884                                 clone=root_config.settings)
9885
9886                 rval = self._check_manifests()
9887                 if rval != os.EX_OK:
9888                         return rval
9889
9890                 keep_going = "--keep-going" in self.myopts
9891                 fetchonly = self._build_opts.fetchonly
9892                 mtimedb = self._mtimedb
9893                 failed_pkgs = self._failed_pkgs
9894
9895                 while True:
9896                         rval = self._merge()
9897                         if rval == os.EX_OK or fetchonly or not keep_going:
9898                                 break
9899                         if "resume" not in mtimedb:
9900                                 break
9901                         mergelist = self._mtimedb["resume"].get("mergelist")
9902                         if not mergelist:
9903                                 break
9904
9905                         if not failed_pkgs:
9906                                 break
9907
9908                         for failed_pkg in failed_pkgs:
9909                                 mergelist.remove(list(failed_pkg.pkg))
9910
9911                         self._failed_pkgs_all.extend(failed_pkgs)
9912                         del failed_pkgs[:]
9913
9914                         if not mergelist:
9915                                 break
9916
9917                         if not self._calc_resume_list():
9918                                 break
9919
9920                         clear_caches(self.trees)
9921                         if not self._mergelist:
9922                                 break
9923
9924                         self._save_resume_list()
9925                         self._pkg_count.curval = 0
9926                         self._pkg_count.maxval = len([x for x in self._mergelist \
9927                                 if isinstance(x, Package) and x.operation == "merge"])
9928                         self._status_display.maxval = self._pkg_count.maxval
9929
9930                 self._logger.log(" *** Finished. Cleaning up...")
9931
9932                 if failed_pkgs:
9933                         self._failed_pkgs_all.extend(failed_pkgs)
9934                         del failed_pkgs[:]
9935
9936                 background = self._background
9937                 failure_log_shown = False
9938                 if background and len(self._failed_pkgs_all) == 1:
9939                         # If only one package failed then just show it's
9940                         # whole log for easy viewing.
9941                         failed_pkg = self._failed_pkgs_all[-1]
9942                         build_dir = failed_pkg.build_dir
9943                         log_file = None
9944
9945                         log_paths = [failed_pkg.build_log]
9946
9947                         log_path = self._locate_failure_log(failed_pkg)
9948                         if log_path is not None:
9949                                 try:
9950                                         log_file = open(log_path, 'rb')
9951                                 except IOError:
9952                                         pass
9953
9954                         if log_file is not None:
9955                                 try:
9956                                         for line in log_file:
9957                                                 writemsg_level(line, noiselevel=-1)
9958                                 finally:
9959                                         log_file.close()
9960                                 failure_log_shown = True
9961
9962                 # Dump mod_echo output now since it tends to flood the terminal.
9963                 # This allows us to avoid having more important output, generated
9964                 # later, from being swept away by the mod_echo output.
9965                 mod_echo_output =  _flush_elog_mod_echo()
9966
9967                 if background and not failure_log_shown and \
9968                         self._failed_pkgs_all and \
9969                         self._failed_pkgs_die_msgs and \
9970                         not mod_echo_output:
9971
9972                         printer = portage.output.EOutput()
9973                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
9974                                 root_msg = ""
9975                                 if mysettings["ROOT"] != "/":
9976                                         root_msg = " merged to %s" % mysettings["ROOT"]
9977                                 print
9978                                 printer.einfo("Error messages for package %s%s:" % \
9979                                         (colorize("INFORM", key), root_msg))
9980                                 print
9981                                 for phase in portage.const.EBUILD_PHASES:
9982                                         if phase not in logentries:
9983                                                 continue
9984                                         for msgtype, msgcontent in logentries[phase]:
9985                                                 if isinstance(msgcontent, basestring):
9986                                                         msgcontent = [msgcontent]
9987                                                 for line in msgcontent:
9988                                                         printer.eerror(line.strip("\n"))
9989
9990                 if self._post_mod_echo_msgs:
9991                         for msg in self._post_mod_echo_msgs:
9992                                 msg()
9993
9994                 if len(self._failed_pkgs_all) > 1:
9995                         msg = "The following packages have " + \
9996                                 "failed to build or install:"
9997                         prefix = bad(" * ")
9998                         writemsg(prefix + "\n", noiselevel=-1)
9999                         from textwrap import wrap
10000                         for line in wrap(msg, 72):
10001                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10002                         writemsg(prefix + "\n", noiselevel=-1)
10003                         for failed_pkg in self._failed_pkgs_all:
10004                                 writemsg("%s\t%s\n" % (prefix,
10005                                         colorize("INFORM", str(failed_pkg.pkg))),
10006                                         noiselevel=-1)
10007                         writemsg(prefix + "\n", noiselevel=-1)
10008
10009                 return rval
10010
10011         def _elog_listener(self, mysettings, key, logentries, fulltext):
10012                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10013                 if errors:
10014                         self._failed_pkgs_die_msgs.append(
10015                                 (mysettings, key, errors))
10016
10017         def _locate_failure_log(self, failed_pkg):
10018
10019                 build_dir = failed_pkg.build_dir
10020                 log_file = None
10021
10022                 log_paths = [failed_pkg.build_log]
10023
10024                 for log_path in log_paths:
10025                         if not log_path:
10026                                 continue
10027
10028                         try:
10029                                 log_size = os.stat(log_path).st_size
10030                         except OSError:
10031                                 continue
10032
10033                         if log_size == 0:
10034                                 continue
10035
10036                         return log_path
10037
10038                 return None
10039
10040         def _add_packages(self):
10041                 pkg_queue = self._pkg_queue
10042                 for pkg in self._mergelist:
10043                         if isinstance(pkg, Package):
10044                                 pkg_queue.append(pkg)
10045                         elif isinstance(pkg, Blocker):
10046                                 pass
10047
10048         def _merge_exit(self, merge):
10049                 self._do_merge_exit(merge)
10050                 self._deallocate_config(merge.merge.settings)
10051                 if merge.returncode == os.EX_OK and \
10052                         not merge.merge.pkg.installed:
10053                         self._status_display.curval += 1
10054                 self._status_display.merges = len(self._task_queues.merge)
10055                 self._schedule()
10056
10057         def _do_merge_exit(self, merge):
10058                 pkg = merge.merge.pkg
10059                 if merge.returncode != os.EX_OK:
10060                         settings = merge.merge.settings
10061                         build_dir = settings.get("PORTAGE_BUILDDIR")
10062                         build_log = settings.get("PORTAGE_LOG_FILE")
10063
10064                         self._failed_pkgs.append(self._failed_pkg(
10065                                 build_dir=build_dir, build_log=build_log,
10066                                 pkg=pkg,
10067                                 returncode=merge.returncode))
10068                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10069
10070                         self._status_display.failed = len(self._failed_pkgs)
10071                         return
10072
10073                 self._task_complete(pkg)
10074                 pkg_to_replace = merge.merge.pkg_to_replace
10075                 if pkg_to_replace is not None:
10076                         # When a package is replaced, mark it's uninstall
10077                         # task complete (if any).
10078                         uninst_hash_key = \
10079                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10080                         self._task_complete(uninst_hash_key)
10081
10082                 if pkg.installed:
10083                         return
10084
10085                 self._restart_if_necessary(pkg)
10086
10087                 # Call mtimedb.commit() after each merge so that
10088                 # --resume still works after being interrupted
10089                 # by reboot, sigkill or similar.
10090                 mtimedb = self._mtimedb
10091                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10092                 if not mtimedb["resume"]["mergelist"]:
10093                         del mtimedb["resume"]
10094                 mtimedb.commit()
10095
10096         def _build_exit(self, build):
10097                 if build.returncode == os.EX_OK:
10098                         self.curval += 1
10099                         merge = PackageMerge(merge=build)
10100                         merge.addExitListener(self._merge_exit)
10101                         self._task_queues.merge.add(merge)
10102                         self._status_display.merges = len(self._task_queues.merge)
10103                 else:
10104                         settings = build.settings
10105                         build_dir = settings.get("PORTAGE_BUILDDIR")
10106                         build_log = settings.get("PORTAGE_LOG_FILE")
10107
10108                         self._failed_pkgs.append(self._failed_pkg(
10109                                 build_dir=build_dir, build_log=build_log,
10110                                 pkg=build.pkg,
10111                                 returncode=build.returncode))
10112                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10113
10114                         self._status_display.failed = len(self._failed_pkgs)
10115                         self._deallocate_config(build.settings)
10116                 self._jobs -= 1
10117                 self._status_display.running = self._jobs
10118                 self._schedule()
10119
10120         def _extract_exit(self, build):
10121                 self._build_exit(build)
10122
10123         def _task_complete(self, pkg):
10124                 self._completed_tasks.add(pkg)
10125                 self._choose_pkg_return_early = False
10126
10127         def _merge(self):
10128
10129                 self._add_prefetchers()
10130                 self._add_packages()
10131                 pkg_queue = self._pkg_queue
10132                 failed_pkgs = self._failed_pkgs
10133                 portage.locks._quiet = self._background
10134                 portage.elog._emerge_elog_listener = self._elog_listener
10135                 rval = os.EX_OK
10136
10137                 try:
10138                         self._main_loop()
10139                 finally:
10140                         self._main_loop_cleanup()
10141                         portage.locks._quiet = False
10142                         portage.elog._emerge_elog_listener = None
10143                         if failed_pkgs:
10144                                 rval = failed_pkgs[-1].returncode
10145
10146                 return rval
10147
10148         def _main_loop_cleanup(self):
10149                 del self._pkg_queue[:]
10150                 self._completed_tasks.clear()
10151                 self._choose_pkg_return_early = False
10152                 self._status_display.reset()
10153                 self._digraph = None
10154                 self._task_queues.fetch.clear()
10155
10156         def _choose_pkg(self):
10157                 """
10158                 Choose a task that has all it's dependencies satisfied.
10159                 """
10160
10161                 if self._choose_pkg_return_early:
10162                         return None
10163
10164                 if self._digraph is None:
10165                         if (self._jobs or self._task_queues.merge) and \
10166                                 not ("--nodeps" in self.myopts and \
10167                                 (self._max_jobs is True or self._max_jobs > 1)):
10168                                 self._choose_pkg_return_early = True
10169                                 return None
10170                         return self._pkg_queue.pop(0)
10171
10172                 if not (self._jobs or self._task_queues.merge):
10173                         return self._pkg_queue.pop(0)
10174
10175                 self._prune_digraph()
10176
10177                 chosen_pkg = None
10178                 later = set(self._pkg_queue)
10179                 for pkg in self._pkg_queue:
10180                         later.remove(pkg)
10181                         if not self._dependent_on_scheduled_merges(pkg, later):
10182                                 chosen_pkg = pkg
10183                                 break
10184
10185                 if chosen_pkg is not None:
10186                         self._pkg_queue.remove(chosen_pkg)
10187
10188                 if chosen_pkg is None:
10189                         # There's no point in searching for a package to
10190                         # choose until at least one of the existing jobs
10191                         # completes.
10192                         self._choose_pkg_return_early = True
10193
10194                 return chosen_pkg
10195
10196         def _dependent_on_scheduled_merges(self, pkg, later):
10197                 """
10198                 Traverse the subgraph of the given packages deep dependencies
10199                 to see if it contains any scheduled merges.
10200                 @param pkg: a package to check dependencies for
10201                 @type pkg: Package
10202                 @param later: packages for which dependence should be ignored
10203                         since they will be merged later than pkg anyway and therefore
10204                         delaying the merge of pkg will not result in a more optimal
10205                         merge order
10206                 @type later: set
10207                 @rtype: bool
10208                 @returns: True if the package is dependent, False otherwise.
10209                 """
10210
10211                 graph = self._digraph
10212                 completed_tasks = self._completed_tasks
10213
10214                 dependent = False
10215                 traversed_nodes = set([pkg])
10216                 direct_deps = graph.child_nodes(pkg)
10217                 node_stack = direct_deps
10218                 direct_deps = frozenset(direct_deps)
10219                 while node_stack:
10220                         node = node_stack.pop()
10221                         if node in traversed_nodes:
10222                                 continue
10223                         traversed_nodes.add(node)
10224                         if not ((node.installed and node.operation == "nomerge") or \
10225                                 (node.operation == "uninstall" and \
10226                                 node not in direct_deps) or \
10227                                 node in completed_tasks or \
10228                                 node in later):
10229                                 dependent = True
10230                                 break
10231                         node_stack.extend(graph.child_nodes(node))
10232
10233                 return dependent
10234
10235         def _allocate_config(self, root):
10236                 """
10237                 Allocate a unique config instance for a task in order
10238                 to prevent interference between parallel tasks.
10239                 """
10240                 if self._config_pool[root]:
10241                         temp_settings = self._config_pool[root].pop()
10242                 else:
10243                         temp_settings = portage.config(clone=self.pkgsettings[root])
10244                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10245                 # performance reasons, call it here to make sure all settings from the
10246                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10247                 temp_settings.reload()
10248                 temp_settings.reset()
10249                 return temp_settings
10250
10251         def _deallocate_config(self, settings):
10252                 self._config_pool[settings["ROOT"]].append(settings)
10253
10254         def _main_loop(self):
10255
10256                 # Only allow 1 job max if a restart is scheduled
10257                 # due to portage update.
10258                 if self._is_restart_scheduled() or \
10259                         self._opts_no_background.intersection(self.myopts):
10260                         self._set_max_jobs(1)
10261
10262                 merge_queue = self._task_queues.merge
10263
10264                 while self._schedule():
10265                         if self._poll_event_handlers:
10266                                 self._poll_loop()
10267
10268                 while True:
10269                         self._schedule()
10270                         if not (self._jobs or merge_queue):
10271                                 break
10272                         if self._poll_event_handlers:
10273                                 self._poll_loop()
10274
10275         def _keep_scheduling(self):
10276                 return bool(self._pkg_queue and \
10277                         not (self._failed_pkgs and not self._build_opts.fetchonly))
10278
10279         def _schedule_tasks(self):
10280                 self._schedule_tasks_imp()
10281                 self._status_display.display()
10282
10283                 state_change = 0
10284                 for q in self._task_queues.values():
10285                         if q.schedule():
10286                                 state_change += 1
10287
10288                 # Cancel prefetchers if they're the only reason
10289                 # the main poll loop is still running.
10290                 if self._failed_pkgs and not self._build_opts.fetchonly and \
10291                         not (self._jobs or self._task_queues.merge) and \
10292                         self._task_queues.fetch:
10293                         self._task_queues.fetch.clear()
10294                         state_change += 1
10295
10296                 if state_change:
10297                         self._schedule_tasks_imp()
10298                         self._status_display.display()
10299
10300                 return self._keep_scheduling()
10301
10302         def _job_delay(self):
10303                 """
10304                 @rtype: bool
10305                 @returns: True if job scheduling should be delayed, False otherwise.
10306                 """
10307
10308                 if self._jobs and self._max_load is not None:
10309
10310                         current_time = time.time()
10311
10312                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10313                         if delay > self._job_delay_max:
10314                                 delay = self._job_delay_max
10315                         if (current_time - self._previous_job_start_time) < delay:
10316                                 return True
10317
10318                 return False
10319
10320         def _schedule_tasks_imp(self):
10321                 """
10322                 @rtype: bool
10323                 @returns: True if state changed, False otherwise.
10324                 """
10325
10326                 state_change = 0
10327
10328                 while True:
10329
10330                         if not self._keep_scheduling():
10331                                 return bool(state_change)
10332
10333                         if self._choose_pkg_return_early or \
10334                                 not self._can_add_job() or \
10335                                 self._job_delay():
10336                                 return bool(state_change)
10337
10338                         pkg = self._choose_pkg()
10339                         if pkg is None:
10340                                 return bool(state_change)
10341
10342                         state_change += 1
10343
10344                         if not pkg.installed:
10345                                 self._pkg_count.curval += 1
10346
10347                         task = self._task(pkg)
10348
10349                         if pkg.installed:
10350                                 merge = PackageMerge(merge=task)
10351                                 merge.addExitListener(self._merge_exit)
10352                                 self._task_queues.merge.add(merge)
10353
10354                         elif pkg.built:
10355                                 self._jobs += 1
10356                                 self._previous_job_start_time = time.time()
10357                                 self._status_display.running = self._jobs
10358                                 task.addExitListener(self._extract_exit)
10359                                 self._task_queues.jobs.add(task)
10360
10361                         else:
10362                                 self._jobs += 1
10363                                 self._previous_job_start_time = time.time()
10364                                 self._status_display.running = self._jobs
10365                                 task.addExitListener(self._build_exit)
10366                                 self._task_queues.jobs.add(task)
10367
10368                 return bool(state_change)
10369
10370         def _task(self, pkg):
10371
10372                 pkg_to_replace = None
10373                 if pkg.operation != "uninstall":
10374                         vardb = pkg.root_config.trees["vartree"].dbapi
10375                         previous_cpv = vardb.match(pkg.slot_atom)
10376                         if previous_cpv:
10377                                 previous_cpv = previous_cpv.pop()
10378                                 pkg_to_replace = self._pkg(previous_cpv,
10379                                         "installed", pkg.root_config, installed=True)
10380
10381                 task = MergeListItem(args_set=self._args_set,
10382                         background=self._background, binpkg_opts=self._binpkg_opts,
10383                         build_opts=self._build_opts,
10384                         config_pool=self._ConfigPool(pkg.root,
10385                         self._allocate_config, self._deallocate_config),
10386                         emerge_opts=self.myopts,
10387                         find_blockers=self._find_blockers(pkg), logger=self._logger,
10388                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10389                         pkg_to_replace=pkg_to_replace,
10390                         prefetcher=self._prefetchers.get(pkg),
10391                         scheduler=self._sched_iface,
10392                         settings=self._allocate_config(pkg.root),
10393                         statusMessage=self._status_msg,
10394                         world_atom=self._world_atom)
10395
10396                 return task
10397
10398         def _failed_pkg_msg(self, failed_pkg, action, preposition):
10399                 pkg = failed_pkg.pkg
10400                 msg = "%s to %s %s" % \
10401                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10402                 if pkg.root != "/":
10403                         msg += " %s %s" % (preposition, pkg.root)
10404
10405                 log_path = self._locate_failure_log(failed_pkg)
10406                 if log_path is not None:
10407                         msg += ", Log file:"
10408                 self._status_msg(msg)
10409
10410                 if log_path is not None:
10411                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10412
10413         def _status_msg(self, msg):
10414                 """
10415                 Display a brief status message (no newlines) in the status display.
10416                 This is called by tasks to provide feedback to the user. This
10417                 delegates the resposibility of generating \r and \n control characters,
10418                 to guarantee that lines are created or erased when necessary and
10419                 appropriate.
10420
10421                 @type msg: str
10422                 @param msg: a brief status message (no newlines allowed)
10423                 """
10424                 if not self._background:
10425                         writemsg_level("\n")
10426                 self._status_display.displayMessage(msg)
10427
10428         def _save_resume_list(self):
10429                 """
10430                 Do this before verifying the ebuild Manifests since it might
10431                 be possible for the user to use --resume --skipfirst get past
10432                 a non-essential package with a broken digest.
10433                 """
10434                 mtimedb = self._mtimedb
10435                 mtimedb["resume"]["mergelist"] = [list(x) \
10436                         for x in self._mergelist \
10437                         if isinstance(x, Package) and x.operation == "merge"]
10438
10439                 mtimedb.commit()
10440
10441         def _calc_resume_list(self):
10442                 """
10443                 Use the current resume list to calculate a new one,
10444                 dropping any packages with unsatisfied deps.
10445                 @rtype: bool
10446                 @returns: True if successful, False otherwise.
10447                 """
10448                 print colorize("GOOD", "*** Resuming merge...")
10449
10450                 if self._show_list():
10451                         if "--tree" in self.myopts:
10452                                 portage.writemsg_stdout("\n" + \
10453                                         darkgreen("These are the packages that " + \
10454                                         "would be merged, in reverse order:\n\n"))
10455
10456                         else:
10457                                 portage.writemsg_stdout("\n" + \
10458                                         darkgreen("These are the packages that " + \
10459                                         "would be merged, in order:\n\n"))
10460
10461                 show_spinner = "--quiet" not in self.myopts and \
10462                         "--nodeps" not in self.myopts
10463
10464                 if show_spinner:
10465                         print "Calculating dependencies  ",
10466
10467                 myparams = create_depgraph_params(self.myopts, None)
10468                 success = False
10469                 e = None
10470                 try:
10471                         success, mydepgraph, dropped_tasks = resume_depgraph(
10472                                 self.settings, self.trees, self._mtimedb, self.myopts,
10473                                 myparams, self._spinner, skip_unsatisfied=True)
10474                 except depgraph.UnsatisfiedResumeDep, e:
10475                         mydepgraph = e.depgraph
10476                         dropped_tasks = set()
10477
10478                 if show_spinner:
10479                         print "\b\b... done!"
10480
10481                 if e is not None:
10482                         def unsatisfied_resume_dep_msg():
10483                                 mydepgraph.display_problems()
10484                                 out = portage.output.EOutput()
10485                                 out.eerror("One or more packages are either masked or " + \
10486                                         "have missing dependencies:")
10487                                 out.eerror("")
10488                                 indent = "  "
10489                                 show_parents = set()
10490                                 for dep in e.value:
10491                                         if dep.parent in show_parents:
10492                                                 continue
10493                                         show_parents.add(dep.parent)
10494                                         if dep.atom is None:
10495                                                 out.eerror(indent + "Masked package:")
10496                                                 out.eerror(2 * indent + str(dep.parent))
10497                                                 out.eerror("")
10498                                         else:
10499                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
10500                                                 out.eerror(2 * indent + str(dep.parent))
10501                                                 out.eerror("")
10502                                 msg = "The resume list contains packages " + \
10503                                         "that are either masked or have " + \
10504                                         "unsatisfied dependencies. " + \
10505                                         "Please restart/continue " + \
10506                                         "the operation manually, or use --skipfirst " + \
10507                                         "to skip the first package in the list and " + \
10508                                         "any other packages that may be " + \
10509                                         "masked or have missing dependencies."
10510                                 for line in textwrap.wrap(msg, 72):
10511                                         out.eerror(line)
10512                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10513                         return False
10514
10515                 if success and self._show_list():
10516                         mylist = mydepgraph.altlist()
10517                         if mylist:
10518                                 if "--tree" in self.myopts:
10519                                         mylist.reverse()
10520                                 mydepgraph.display(mylist, favorites=self._favorites)
10521
10522                 if not success:
10523                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10524                         return False
10525                 mydepgraph.display_problems()
10526
10527                 mylist = mydepgraph.altlist()
10528                 mydepgraph.break_refs(mylist)
10529                 mydepgraph.break_refs(dropped_tasks)
10530                 self._mergelist = mylist
10531                 self._set_digraph(mydepgraph.schedulerGraph())
10532
10533                 msg_width = 75
10534                 for task in dropped_tasks:
10535                         if not (isinstance(task, Package) and task.operation == "merge"):
10536                                 continue
10537                         pkg = task
10538                         msg = "emerge --keep-going:" + \
10539                                 " %s" % (pkg.cpv,)
10540                         if pkg.root != "/":
10541                                 msg += " for %s" % (pkg.root,)
10542                         msg += " dropped due to unsatisfied dependency."
10543                         for line in textwrap.wrap(msg, msg_width):
10544                                 eerror(line, phase="other", key=pkg.cpv)
10545                         settings = self.pkgsettings[pkg.root]
10546                         # Ensure that log collection from $T is disabled inside
10547                         # elog_process(), since any logs that might exist are
10548                         # not valid here.
10549                         settings.pop("T", None)
10550                         portage.elog.elog_process(pkg.cpv, settings)
10551                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10552
10553                 return True
10554
10555         def _show_list(self):
10556                 myopts = self.myopts
10557                 if "--quiet" not in myopts and \
10558                         ("--ask" in myopts or "--tree" in myopts or \
10559                         "--verbose" in myopts):
10560                         return True
10561                 return False
10562
10563         def _world_atom(self, pkg):
10564                 """
10565                 Add the package to the world file, but only if
10566                 it's supposed to be added. Otherwise, do nothing.
10567                 """
10568
10569                 if set(("--buildpkgonly", "--fetchonly",
10570                         "--fetch-all-uri",
10571                         "--oneshot", "--onlydeps",
10572                         "--pretend")).intersection(self.myopts):
10573                         return
10574
10575                 if pkg.root != self.target_root:
10576                         return
10577
10578                 args_set = self._args_set
10579                 if not args_set.findAtomForPackage(pkg):
10580                         return
10581
10582                 logger = self._logger
10583                 pkg_count = self._pkg_count
10584                 root_config = pkg.root_config
10585                 world_set = root_config.sets["world"]
10586                 world_locked = False
10587                 if hasattr(world_set, "lock"):
10588                         world_set.lock()
10589                         world_locked = True
10590
10591                 try:
10592                         if hasattr(world_set, "load"):
10593                                 world_set.load() # maybe it's changed on disk
10594
10595                         atom = create_world_atom(pkg, args_set, root_config)
10596                         if atom:
10597                                 if hasattr(world_set, "add"):
10598                                         self._status_msg(('Recording %s in "world" ' + \
10599                                                 'favorites file...') % atom)
10600                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
10601                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10602                                         world_set.add(atom)
10603                                 else:
10604                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10605                                                 (atom,), level=logging.WARN, noiselevel=-1)
10606                 finally:
10607                         if world_locked:
10608                                 world_set.unlock()
10609
10610         def _pkg(self, cpv, type_name, root_config, installed=False):
10611                 """
10612                 Get a package instance from the cache, or create a new
10613                 one if necessary. Raises KeyError from aux_get if it
10614                 failures for some reason (package does not exist or is
10615                 corrupt).
10616                 """
10617                 operation = "merge"
10618                 if installed:
10619                         operation = "nomerge"
10620
10621                 if self._digraph is not None:
10622                         # Reuse existing instance when available.
10623                         pkg = self._digraph.get(
10624                                 (type_name, root_config.root, cpv, operation))
10625                         if pkg is not None:
10626                                 return pkg
10627
10628                 tree_type = depgraph.pkg_tree_map[type_name]
10629                 db = root_config.trees[tree_type].dbapi
10630                 db_keys = list(self.trees[root_config.root][
10631                         tree_type].dbapi._aux_cache_keys)
10632                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10633                 pkg = Package(cpv=cpv, metadata=metadata,
10634                         root_config=root_config, installed=installed)
10635                 if type_name == "ebuild":
10636                         settings = self.pkgsettings[root_config.root]
10637                         settings.setcpv(pkg)
10638                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
10639
10640                 return pkg
10641
10642 class MetadataRegen(PollScheduler):
10643
10644         def __init__(self, portdb, max_jobs=None, max_load=None):
10645                 PollScheduler.__init__(self)
10646                 self._portdb = portdb
10647
10648                 if max_jobs is None:
10649                         max_jobs = 1
10650
10651                 self._max_jobs = max_jobs
10652                 self._max_load = max_load
10653                 self._sched_iface = self._sched_iface_class(
10654                         register=self._register,
10655                         schedule=self._schedule_wait,
10656                         unregister=self._unregister)
10657
10658                 self._valid_pkgs = set()
10659                 self._process_iter = self._iter_metadata_processes()
10660
10661         def _iter_metadata_processes(self):
10662                 portdb = self._portdb
10663                 valid_pkgs = self._valid_pkgs
10664                 every_cp = portdb.cp_all()
10665                 every_cp.sort(reverse=True)
10666
10667                 while every_cp:
10668                         cp = every_cp.pop()
10669                         portage.writemsg_stdout("Processing %s\n" % cp)
10670                         cpv_list = portdb.cp_list(cp)
10671                         for cpv in cpv_list:
10672                                 valid_pkgs.add(cpv)
10673                                 ebuild_path, repo_path = portdb.findname2(cpv)
10674                                 metadata_process = portdb._metadata_process(
10675                                         cpv, ebuild_path, repo_path)
10676                                 if metadata_process is None:
10677                                         continue
10678                                 yield metadata_process
10679
10680         def run(self):
10681
10682                 portdb = self._portdb
10683                 from portage.cache.cache_errors import CacheError
10684                 dead_nodes = {}
10685
10686                 for mytree in portdb.porttrees:
10687                         try:
10688                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10689                         except CacheError, e:
10690                                 portage.writemsg("Error listing cache entries for " + \
10691                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10692                                 del e
10693                                 dead_nodes = None
10694                                 break
10695
10696                 while self._schedule():
10697                         self._poll_loop()
10698
10699                 while self._jobs:
10700                         self._poll_loop()
10701
10702                 if dead_nodes:
10703                         for y in self._valid_pkgs:
10704                                 for mytree in portdb.porttrees:
10705                                         if portdb.findname2(y, mytree=mytree)[0]:
10706                                                 dead_nodes[mytree].discard(y)
10707
10708                         for mytree, nodes in dead_nodes.iteritems():
10709                                 auxdb = portdb.auxdb[mytree]
10710                                 for y in nodes:
10711                                         try:
10712                                                 del auxdb[y]
10713                                         except (KeyError, CacheError):
10714                                                 pass
10715
10716         def _schedule_tasks(self):
10717                 """
10718                 @rtype: bool
10719                 @returns: True if there may be remaining tasks to schedule,
10720                         False otherwise.
10721                 """
10722                 while self._can_add_job():
10723                         try:
10724                                 metadata_process = self._process_iter.next()
10725                         except StopIteration:
10726                                 return False
10727
10728                         self._jobs += 1
10729                         metadata_process.scheduler = self._sched_iface
10730                         metadata_process.addExitListener(self._metadata_exit)
10731                         metadata_process.start()
10732                 return True
10733
10734         def _metadata_exit(self, metadata_process):
10735                 self._jobs -= 1
10736                 if metadata_process.returncode != os.EX_OK:
10737                         self._valid_pkgs.discard(metadata_process.cpv)
10738                         portage.writemsg("Error processing %s, continuing...\n" % \
10739                                 (metadata_process.cpv,))
10740                 self._schedule()
10741
10742 class UninstallFailure(portage.exception.PortageException):
10743         """
10744         An instance of this class is raised by unmerge() when
10745         an uninstallation fails.
10746         """
10747         status = 1
10748         def __init__(self, *pargs):
10749                 portage.exception.PortageException.__init__(self, pargs)
10750                 if pargs:
10751                         self.status = pargs[0]
10752
10753 def unmerge(root_config, myopts, unmerge_action,
10754         unmerge_files, ldpath_mtimes, autoclean=0,
10755         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10756         scheduler=None, writemsg_level=portage.util.writemsg_level):
10757
10758         quiet = "--quiet" in myopts
10759         settings = root_config.settings
10760         sets = root_config.sets
10761         vartree = root_config.trees["vartree"]
10762         candidate_catpkgs=[]
10763         global_unmerge=0
10764         xterm_titles = "notitles" not in settings.features
10765         out = portage.output.EOutput()
10766         pkg_cache = {}
10767         db_keys = list(vartree.dbapi._aux_cache_keys)
10768
10769         def _pkg(cpv):
10770                 pkg = pkg_cache.get(cpv)
10771                 if pkg is None:
10772                         pkg = Package(cpv=cpv, installed=True,
10773                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10774                                 root_config=root_config,
10775                                 type_name="installed")
10776                         pkg_cache[cpv] = pkg
10777                 return pkg
10778
10779         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10780         try:
10781                 # At least the parent needs to exist for the lock file.
10782                 portage.util.ensure_dirs(vdb_path)
10783         except portage.exception.PortageException:
10784                 pass
10785         vdb_lock = None
10786         try:
10787                 if os.access(vdb_path, os.W_OK):
10788                         vdb_lock = portage.locks.lockdir(vdb_path)
10789                 realsyslist = sets["system"].getAtoms()
10790                 syslist = []
10791                 for x in realsyslist:
10792                         mycp = portage.dep_getkey(x)
10793                         if mycp in settings.getvirtuals():
10794                                 providers = []
10795                                 for provider in settings.getvirtuals()[mycp]:
10796                                         if vartree.dbapi.match(provider):
10797                                                 providers.append(provider)
10798                                 if len(providers) == 1:
10799                                         syslist.extend(providers)
10800                         else:
10801                                 syslist.append(mycp)
10802         
10803                 mysettings = portage.config(clone=settings)
10804         
10805                 if not unmerge_files:
10806                         if unmerge_action == "unmerge":
10807                                 print
10808                                 print bold("emerge unmerge") + " can only be used with specific package names"
10809                                 print
10810                                 return 0
10811                         else:
10812                                 global_unmerge = 1
10813         
10814                 localtree = vartree
10815                 # process all arguments and add all
10816                 # valid db entries to candidate_catpkgs
10817                 if global_unmerge:
10818                         if not unmerge_files:
10819                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10820                 else:
10821                         #we've got command-line arguments
10822                         if not unmerge_files:
10823                                 print "\nNo packages to unmerge have been provided.\n"
10824                                 return 0
10825                         for x in unmerge_files:
10826                                 arg_parts = x.split('/')
10827                                 if x[0] not in [".","/"] and \
10828                                         arg_parts[-1][-7:] != ".ebuild":
10829                                         #possible cat/pkg or dep; treat as such
10830                                         candidate_catpkgs.append(x)
10831                                 elif unmerge_action in ["prune","clean"]:
10832                                         print "\n!!! Prune and clean do not accept individual" + \
10833                                                 " ebuilds as arguments;\n    skipping.\n"
10834                                         continue
10835                                 else:
10836                                         # it appears that the user is specifying an installed
10837                                         # ebuild and we're in "unmerge" mode, so it's ok.
10838                                         if not os.path.exists(x):
10839                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
10840                                                 return 0
10841         
10842                                         absx   = os.path.abspath(x)
10843                                         sp_absx = absx.split("/")
10844                                         if sp_absx[-1][-7:] == ".ebuild":
10845                                                 del sp_absx[-1]
10846                                                 absx = "/".join(sp_absx)
10847         
10848                                         sp_absx_len = len(sp_absx)
10849         
10850                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10851                                         vdb_len  = len(vdb_path)
10852         
10853                                         sp_vdb     = vdb_path.split("/")
10854                                         sp_vdb_len = len(sp_vdb)
10855         
10856                                         if not os.path.exists(absx+"/CONTENTS"):
10857                                                 print "!!! Not a valid db dir: "+str(absx)
10858                                                 return 0
10859         
10860                                         if sp_absx_len <= sp_vdb_len:
10861                                                 # The Path is shorter... so it can't be inside the vdb.
10862                                                 print sp_absx
10863                                                 print absx
10864                                                 print "\n!!!",x,"cannot be inside "+ \
10865                                                         vdb_path+"; aborting.\n"
10866                                                 return 0
10867         
10868                                         for idx in range(0,sp_vdb_len):
10869                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
10870                                                         print sp_absx
10871                                                         print absx
10872                                                         print "\n!!!", x, "is not inside "+\
10873                                                                 vdb_path+"; aborting.\n"
10874                                                         return 0
10875         
10876                                         print "="+"/".join(sp_absx[sp_vdb_len:])
10877                                         candidate_catpkgs.append(
10878                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
10879         
10880                 newline=""
10881                 if (not "--quiet" in myopts):
10882                         newline="\n"
10883                 if settings["ROOT"] != "/":
10884                         writemsg_level(darkgreen(newline+ \
10885                                 ">>> Using system located in ROOT tree %s\n" % \
10886                                 settings["ROOT"]))
10887
10888                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
10889                         not ("--quiet" in myopts):
10890                         writemsg_level(darkgreen(newline+\
10891                                 ">>> These are the packages that would be unmerged:\n"))
10892
10893                 # Preservation of order is required for --depclean and --prune so
10894                 # that dependencies are respected. Use all_selected to eliminate
10895                 # duplicate packages since the same package may be selected by
10896                 # multiple atoms.
10897                 pkgmap = []
10898                 all_selected = set()
10899                 for x in candidate_catpkgs:
10900                         # cycle through all our candidate deps and determine
10901                         # what will and will not get unmerged
10902                         try:
10903                                 mymatch = vartree.dbapi.match(x)
10904                         except portage.exception.AmbiguousPackageName, errpkgs:
10905                                 print "\n\n!!! The short ebuild name \"" + \
10906                                         x + "\" is ambiguous.  Please specify"
10907                                 print "!!! one of the following fully-qualified " + \
10908                                         "ebuild names instead:\n"
10909                                 for i in errpkgs[0]:
10910                                         print "    " + green(i)
10911                                 print
10912                                 sys.exit(1)
10913         
10914                         if not mymatch and x[0] not in "<>=~":
10915                                 mymatch = localtree.dep_match(x)
10916                         if not mymatch:
10917                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
10918                                         (x, unmerge_action), noiselevel=-1)
10919                                 continue
10920
10921                         pkgmap.append(
10922                                 {"protected": set(), "selected": set(), "omitted": set()})
10923                         mykey = len(pkgmap) - 1
10924                         if unmerge_action=="unmerge":
10925                                         for y in mymatch:
10926                                                 if y not in all_selected:
10927                                                         pkgmap[mykey]["selected"].add(y)
10928                                                         all_selected.add(y)
10929                         elif unmerge_action == "prune":
10930                                 if len(mymatch) == 1:
10931                                         continue
10932                                 best_version = mymatch[0]
10933                                 best_slot = vartree.getslot(best_version)
10934                                 best_counter = vartree.dbapi.cpv_counter(best_version)
10935                                 for mypkg in mymatch[1:]:
10936                                         myslot = vartree.getslot(mypkg)
10937                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
10938                                         if (myslot == best_slot and mycounter > best_counter) or \
10939                                                 mypkg == portage.best([mypkg, best_version]):
10940                                                 if myslot == best_slot:
10941                                                         if mycounter < best_counter:
10942                                                                 # On slot collision, keep the one with the
10943                                                                 # highest counter since it is the most
10944                                                                 # recently installed.
10945                                                                 continue
10946                                                 best_version = mypkg
10947                                                 best_slot = myslot
10948                                                 best_counter = mycounter
10949                                 pkgmap[mykey]["protected"].add(best_version)
10950                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
10951                                         if mypkg != best_version and mypkg not in all_selected)
10952                                 all_selected.update(pkgmap[mykey]["selected"])
10953                         else:
10954                                 # unmerge_action == "clean"
10955                                 slotmap={}
10956                                 for mypkg in mymatch:
10957                                         if unmerge_action == "clean":
10958                                                 myslot = localtree.getslot(mypkg)
10959                                         else:
10960                                                 # since we're pruning, we don't care about slots
10961                                                 # and put all the pkgs in together
10962                                                 myslot = 0
10963                                         if myslot not in slotmap:
10964                                                 slotmap[myslot] = {}
10965                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
10966                                 
10967                                 for myslot in slotmap:
10968                                         counterkeys = slotmap[myslot].keys()
10969                                         if not counterkeys:
10970                                                 continue
10971                                         counterkeys.sort()
10972                                         pkgmap[mykey]["protected"].add(
10973                                                 slotmap[myslot][counterkeys[-1]])
10974                                         del counterkeys[-1]
10975                                         #be pretty and get them in order of merge:
10976                                         for ckey in counterkeys:
10977                                                 mypkg = slotmap[myslot][ckey]
10978                                                 if mypkg not in all_selected:
10979                                                         pkgmap[mykey]["selected"].add(mypkg)
10980                                                         all_selected.add(mypkg)
10981                                         # ok, now the last-merged package
10982                                         # is protected, and the rest are selected
10983                 numselected = len(all_selected)
10984                 if global_unmerge and not numselected:
10985                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
10986                         return 0
10987         
10988                 if not numselected:
10989                         portage.writemsg_stdout(
10990                                 "\n>>> No packages selected for removal by " + \
10991                                 unmerge_action + "\n")
10992                         return 0
10993         finally:
10994                 if vdb_lock:
10995                         vartree.dbapi.flush_cache()
10996                         portage.locks.unlockdir(vdb_lock)
10997         
10998         from portage.sets.base import EditablePackageSet
10999         
11000         # generate a list of package sets that are directly or indirectly listed in "world",
11001         # as there is no persistent list of "installed" sets
11002         installed_sets = ["world"]
11003         stop = False
11004         pos = 0
11005         while not stop:
11006                 stop = True
11007                 pos = len(installed_sets)
11008                 for s in installed_sets[pos - 1:]:
11009                         if s not in sets:
11010                                 continue
11011                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11012                         if candidates:
11013                                 stop = False
11014                                 installed_sets += candidates
11015         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11016         del stop, pos
11017
11018         # we don't want to unmerge packages that are still listed in user-editable package sets
11019         # listed in "world" as they would be remerged on the next update of "world" or the 
11020         # relevant package sets.
11021         unknown_sets = set()
11022         for cp in xrange(len(pkgmap)):
11023                 for cpv in pkgmap[cp]["selected"].copy():
11024                         try:
11025                                 pkg = _pkg(cpv)
11026                         except KeyError:
11027                                 # It could have been uninstalled
11028                                 # by a concurrent process.
11029                                 continue
11030
11031                         if unmerge_action != "clean" and \
11032                                 root_config.root == "/" and \
11033                                 portage.match_from_list(
11034                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11035                                 msg = ("Not unmerging package %s since there is no valid " + \
11036                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11037                                 for line in textwrap.wrap(msg, 75):
11038                                         out.eerror(line)
11039                                 # adjust pkgmap so the display output is correct
11040                                 pkgmap[cp]["selected"].remove(cpv)
11041                                 all_selected.remove(cpv)
11042                                 pkgmap[cp]["protected"].add(cpv)
11043                                 continue
11044
11045                         parents = []
11046                         for s in installed_sets:
11047                                 # skip sets that the user requested to unmerge, and skip world 
11048                                 # unless we're unmerging a package set (as the package would be 
11049                                 # removed from "world" later on)
11050                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11051                                         continue
11052
11053                                 if s not in sets:
11054                                         if s in unknown_sets:
11055                                                 continue
11056                                         unknown_sets.add(s)
11057                                         out = portage.output.EOutput()
11058                                         out.eerror(("Unknown set '@%s' in " + \
11059                                                 "%svar/lib/portage/world_sets") % \
11060                                                 (s, root_config.root))
11061                                         continue
11062
11063                                 # only check instances of EditablePackageSet as other classes are generally used for
11064                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11065                                 # user can't do much about them anyway)
11066                                 if isinstance(sets[s], EditablePackageSet):
11067
11068                                         # This is derived from a snippet of code in the
11069                                         # depgraph._iter_atoms_for_pkg() method.
11070                                         for atom in sets[s].iterAtomsForPackage(pkg):
11071                                                 inst_matches = vartree.dbapi.match(atom)
11072                                                 inst_matches.reverse() # descending order
11073                                                 higher_slot = None
11074                                                 for inst_cpv in inst_matches:
11075                                                         try:
11076                                                                 inst_pkg = _pkg(inst_cpv)
11077                                                         except KeyError:
11078                                                                 # It could have been uninstalled
11079                                                                 # by a concurrent process.
11080                                                                 continue
11081
11082                                                         if inst_pkg.cp != atom.cp:
11083                                                                 continue
11084                                                         if pkg >= inst_pkg:
11085                                                                 # This is descending order, and we're not
11086                                                                 # interested in any versions <= pkg given.
11087                                                                 break
11088                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11089                                                                 higher_slot = inst_pkg
11090                                                                 break
11091                                                 if higher_slot is None:
11092                                                         parents.append(s)
11093                                                         break
11094                         if parents:
11095                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11096                                 #print colorize("WARN", "but still listed in the following package sets:")
11097                                 #print "    %s\n" % ", ".join(parents)
11098                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11099                                 print colorize("WARN", "still referenced by the following package sets:")
11100                                 print "    %s\n" % ", ".join(parents)
11101                                 # adjust pkgmap so the display output is correct
11102                                 pkgmap[cp]["selected"].remove(cpv)
11103                                 all_selected.remove(cpv)
11104                                 pkgmap[cp]["protected"].add(cpv)
11105         
11106         del installed_sets
11107
11108         numselected = len(all_selected)
11109         if not numselected:
11110                 writemsg_level(
11111                         "\n>>> No packages selected for removal by " + \
11112                         unmerge_action + "\n")
11113                 return 0
11114
11115         # Unmerge order only matters in some cases
11116         if not ordered:
11117                 unordered = {}
11118                 for d in pkgmap:
11119                         selected = d["selected"]
11120                         if not selected:
11121                                 continue
11122                         cp = portage.cpv_getkey(iter(selected).next())
11123                         cp_dict = unordered.get(cp)
11124                         if cp_dict is None:
11125                                 cp_dict = {}
11126                                 unordered[cp] = cp_dict
11127                                 for k in d:
11128                                         cp_dict[k] = set()
11129                         for k, v in d.iteritems():
11130                                 cp_dict[k].update(v)
11131                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11132
11133         for x in xrange(len(pkgmap)):
11134                 selected = pkgmap[x]["selected"]
11135                 if not selected:
11136                         continue
11137                 for mytype, mylist in pkgmap[x].iteritems():
11138                         if mytype == "selected":
11139                                 continue
11140                         mylist.difference_update(all_selected)
11141                 cp = portage.cpv_getkey(iter(selected).next())
11142                 for y in localtree.dep_match(cp):
11143                         if y not in pkgmap[x]["omitted"] and \
11144                                 y not in pkgmap[x]["selected"] and \
11145                                 y not in pkgmap[x]["protected"] and \
11146                                 y not in all_selected:
11147                                 pkgmap[x]["omitted"].add(y)
11148                 if global_unmerge and not pkgmap[x]["selected"]:
11149                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
11150                         continue
11151                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11152                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
11153                                 "'%s' is part of your system profile.\n" % cp),
11154                                 level=logging.WARNING, noiselevel=-1)
11155                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11156                                 "be damaging to your system.\n\n"),
11157                                 level=logging.WARNING, noiselevel=-1)
11158                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11159                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11160                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11161                 if not quiet:
11162                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11163                 else:
11164                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
11165                 for mytype in ["selected","protected","omitted"]:
11166                         if not quiet:
11167                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11168                         if pkgmap[x][mytype]:
11169                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11170                                 sorted_pkgs.sort(portage.pkgcmp)
11171                                 for pn, ver, rev in sorted_pkgs:
11172                                         if rev == "r0":
11173                                                 myversion = ver
11174                                         else:
11175                                                 myversion = ver + "-" + rev
11176                                         if mytype == "selected":
11177                                                 writemsg_level(
11178                                                         colorize("UNMERGE_WARN", myversion + " "),
11179                                                         noiselevel=-1)
11180                                         else:
11181                                                 writemsg_level(
11182                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
11183                         else:
11184                                 writemsg_level("none ", noiselevel=-1)
11185                         if not quiet:
11186                                 writemsg_level("\n", noiselevel=-1)
11187                 if quiet:
11188                         writemsg_level("\n", noiselevel=-1)
11189
11190         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11191                 " packages are slated for removal.\n")
11192         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11193                         " and " + colorize("GOOD", "'omitted'") + \
11194                         " packages will not be removed.\n\n")
11195
11196         if "--pretend" in myopts:
11197                 #we're done... return
11198                 return 0
11199         if "--ask" in myopts:
11200                 if userquery("Would you like to unmerge these packages?")=="No":
11201                         # enter pretend mode for correct formatting of results
11202                         myopts["--pretend"] = True
11203                         print
11204                         print "Quitting."
11205                         print
11206                         return 0
11207         #the real unmerging begins, after a short delay....
11208         if clean_delay and not autoclean:
11209                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11210
11211         for x in xrange(len(pkgmap)):
11212                 for y in pkgmap[x]["selected"]:
11213                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11214                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11215                         mysplit = y.split("/")
11216                         #unmerge...
11217                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11218                                 mysettings, unmerge_action not in ["clean","prune"],
11219                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11220                                 scheduler=scheduler)
11221
11222                         if retval != os.EX_OK:
11223                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11224                                 if raise_on_error:
11225                                         raise UninstallFailure(retval)
11226                                 sys.exit(retval)
11227                         else:
11228                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
11229                                         sets["world"].cleanPackage(vartree.dbapi, y)
11230                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
11231         if clean_world and hasattr(sets["world"], "remove"):
11232                 for s in root_config.setconfig.active:
11233                         sets["world"].remove(SETPREFIX+s)
11234         return 1
11235
11236 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11237
11238         if os.path.exists("/usr/bin/install-info"):
11239                 out = portage.output.EOutput()
11240                 regen_infodirs=[]
11241                 for z in infodirs:
11242                         if z=='':
11243                                 continue
11244                         inforoot=normpath(root+z)
11245                         if os.path.isdir(inforoot):
11246                                 infomtime = long(os.stat(inforoot).st_mtime)
11247                                 if inforoot not in prev_mtimes or \
11248                                         prev_mtimes[inforoot] != infomtime:
11249                                                 regen_infodirs.append(inforoot)
11250
11251                 if not regen_infodirs:
11252                         portage.writemsg_stdout("\n")
11253                         out.einfo("GNU info directory index is up-to-date.")
11254                 else:
11255                         portage.writemsg_stdout("\n")
11256                         out.einfo("Regenerating GNU info directory index...")
11257
11258                         dir_extensions = ("", ".gz", ".bz2")
11259                         icount=0
11260                         badcount=0
11261                         errmsg = ""
11262                         for inforoot in regen_infodirs:
11263                                 if inforoot=='':
11264                                         continue
11265
11266                                 if not os.path.isdir(inforoot) or \
11267                                         not os.access(inforoot, os.W_OK):
11268                                         continue
11269
11270                                 file_list = os.listdir(inforoot)
11271                                 file_list.sort()
11272                                 dir_file = os.path.join(inforoot, "dir")
11273                                 moved_old_dir = False
11274                                 processed_count = 0
11275                                 for x in file_list:
11276                                         if x.startswith(".") or \
11277                                                 os.path.isdir(os.path.join(inforoot, x)):
11278                                                 continue
11279                                         if x.startswith("dir"):
11280                                                 skip = False
11281                                                 for ext in dir_extensions:
11282                                                         if x == "dir" + ext or \
11283                                                                 x == "dir" + ext + ".old":
11284                                                                 skip = True
11285                                                                 break
11286                                                 if skip:
11287                                                         continue
11288                                         if processed_count == 0:
11289                                                 for ext in dir_extensions:
11290                                                         try:
11291                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
11292                                                                 moved_old_dir = True
11293                                                         except EnvironmentError, e:
11294                                                                 if e.errno != errno.ENOENT:
11295                                                                         raise
11296                                                                 del e
11297                                         processed_count += 1
11298                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11299                                         existsstr="already exists, for file `"
11300                                         if myso!="":
11301                                                 if re.search(existsstr,myso):
11302                                                         # Already exists... Don't increment the count for this.
11303                                                         pass
11304                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
11305                                                         # This info file doesn't contain a DIR-header: install-info produces this
11306                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
11307                                                         # Don't increment the count for this.
11308                                                         pass
11309                                                 else:
11310                                                         badcount=badcount+1
11311                                                         errmsg += myso + "\n"
11312                                         icount=icount+1
11313
11314                                 if moved_old_dir and not os.path.exists(dir_file):
11315                                         # We didn't generate a new dir file, so put the old file
11316                                         # back where it was originally found.
11317                                         for ext in dir_extensions:
11318                                                 try:
11319                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
11320                                                 except EnvironmentError, e:
11321                                                         if e.errno != errno.ENOENT:
11322                                                                 raise
11323                                                         del e
11324
11325                                 # Clean dir.old cruft so that they don't prevent
11326                                 # unmerge of otherwise empty directories.
11327                                 for ext in dir_extensions:
11328                                         try:
11329                                                 os.unlink(dir_file + ext + ".old")
11330                                         except EnvironmentError, e:
11331                                                 if e.errno != errno.ENOENT:
11332                                                         raise
11333                                                 del e
11334
11335                                 #update mtime so we can potentially avoid regenerating.
11336                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11337
11338                         if badcount:
11339                                 out.eerror("Processed %d info files; %d errors." % \
11340                                         (icount, badcount))
11341                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11342                         else:
11343                                 if icount > 0:
11344                                         out.einfo("Processed %d info files." % (icount,))
11345
11346
11347 def display_news_notification(root_config, myopts):
11348         target_root = root_config.root
11349         trees = root_config.trees
11350         settings = trees["vartree"].settings
11351         portdb = trees["porttree"].dbapi
11352         vardb = trees["vartree"].dbapi
11353         NEWS_PATH = os.path.join("metadata", "news")
11354         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11355         newsReaderDisplay = False
11356         update = "--pretend" not in myopts
11357
11358         for repo in portdb.getRepositories():
11359                 unreadItems = checkUpdatedNewsItems(
11360                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11361                 if unreadItems:
11362                         if not newsReaderDisplay:
11363                                 newsReaderDisplay = True
11364                                 print
11365                         print colorize("WARN", " * IMPORTANT:"),
11366                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11367                         
11368         
11369         if newsReaderDisplay:
11370                 print colorize("WARN", " *"),
11371                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11372                 print
11373
11374 def display_preserved_libs(vardbapi):
11375         MAX_DISPLAY = 3
11376
11377         # Ensure the registry is consistent with existing files.
11378         vardbapi.plib_registry.pruneNonExisting()
11379
11380         if vardbapi.plib_registry.hasEntries():
11381                 print
11382                 print colorize("WARN", "!!!") + " existing preserved libs:"
11383                 plibdata = vardbapi.plib_registry.getPreservedLibs()
11384                 linkmap = vardbapi.linkmap
11385                 consumer_map = {}
11386                 owners = {}
11387                 linkmap_broken = False
11388
11389                 try:
11390                         linkmap.rebuild()
11391                 except portage.exception.CommandNotFound, e:
11392                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
11393                                 level=logging.ERROR, noiselevel=-1)
11394                         del e
11395                         linkmap_broken = True
11396                 else:
11397                         search_for_owners = set()
11398                         for cpv in plibdata:
11399                                 for f in plibdata[cpv]:
11400                                         if f in consumer_map:
11401                                                 continue
11402                                         consumers = list(linkmap.findConsumers(f))
11403                                         consumers.sort()
11404                                         consumer_map[f] = consumers
11405                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
11406
11407                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11408
11409                 for cpv in plibdata:
11410                         print colorize("WARN", ">>>") + " package: %s" % cpv
11411                         samefile_map = {}
11412                         for f in plibdata[cpv]:
11413                                 obj_key = linkmap._obj_key(f)
11414                                 alt_paths = samefile_map.get(obj_key)
11415                                 if alt_paths is None:
11416                                         alt_paths = set()
11417                                         samefile_map[obj_key] = alt_paths
11418                                 alt_paths.add(f)
11419
11420                         for alt_paths in samefile_map.itervalues():
11421                                 alt_paths = sorted(alt_paths)
11422                                 for p in alt_paths:
11423                                         print colorize("WARN", " * ") + " - %s" % (p,)
11424                                 f = alt_paths[0]
11425                                 consumers = consumer_map.get(f, [])
11426                                 for c in consumers[:MAX_DISPLAY]:
11427                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
11428                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11429                                 if len(consumers) == MAX_DISPLAY + 1:
11430                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
11431                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11432                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
11433                                 elif len(consumers) > MAX_DISPLAY:
11434                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
11435                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11436
11437
11438 def _flush_elog_mod_echo():
11439         """
11440         Dump the mod_echo output now so that our other
11441         notifications are shown last.
11442         @rtype: bool
11443         @returns: True if messages were shown, False otherwise.
11444         """
11445         messages_shown = False
11446         try:
11447                 from portage.elog import mod_echo
11448         except ImportError:
11449                 pass # happens during downgrade to a version without the module
11450         else:
11451                 messages_shown = bool(mod_echo._items)
11452                 mod_echo.finalize()
11453         return messages_shown
11454
11455 def post_emerge(root_config, myopts, mtimedb, retval):
11456         """
11457         Misc. things to run at the end of a merge session.
11458         
11459         Update Info Files
11460         Update Config Files
11461         Update News Items
11462         Commit mtimeDB
11463         Display preserved libs warnings
11464         Exit Emerge
11465
11466         @param trees: A dictionary mapping each ROOT to it's package databases
11467         @type trees: dict
11468         @param mtimedb: The mtimeDB to store data needed across merge invocations
11469         @type mtimedb: MtimeDB class instance
11470         @param retval: Emerge's return value
11471         @type retval: Int
11472         @rype: None
11473         @returns:
11474         1.  Calls sys.exit(retval)
11475         """
11476
11477         target_root = root_config.root
11478         trees = { target_root : root_config.trees }
11479         vardbapi = trees[target_root]["vartree"].dbapi
11480         settings = vardbapi.settings
11481         info_mtimes = mtimedb["info"]
11482
11483         # Load the most current variables from ${ROOT}/etc/profile.env
11484         settings.unlock()
11485         settings.reload()
11486         settings.regenerate()
11487         settings.lock()
11488
11489         config_protect = settings.get("CONFIG_PROTECT","").split()
11490         infodirs = settings.get("INFOPATH","").split(":") + \
11491                 settings.get("INFODIR","").split(":")
11492
11493         os.chdir("/")
11494
11495         if retval == os.EX_OK:
11496                 exit_msg = " *** exiting successfully."
11497         else:
11498                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11499         emergelog("notitles" not in settings.features, exit_msg)
11500
11501         _flush_elog_mod_echo()
11502
11503         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11504         if counter_hash is not None and \
11505                 counter_hash == vardbapi._counter_hash():
11506                 # If vdb state has not changed then there's nothing else to do.
11507                 sys.exit(retval)
11508
11509         vdb_path = os.path.join(target_root, portage.VDB_PATH)
11510         portage.util.ensure_dirs(vdb_path)
11511         vdb_lock = None
11512         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11513                 vdb_lock = portage.locks.lockdir(vdb_path)
11514
11515         if vdb_lock:
11516                 try:
11517                         if "noinfo" not in settings.features:
11518                                 chk_updated_info_files(target_root,
11519                                         infodirs, info_mtimes, retval)
11520                         mtimedb.commit()
11521                 finally:
11522                         if vdb_lock:
11523                                 portage.locks.unlockdir(vdb_lock)
11524
11525         chk_updated_cfg_files(target_root, config_protect)
11526         
11527         display_news_notification(root_config, myopts)
11528         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11529                 display_preserved_libs(vardbapi)        
11530
11531         sys.exit(retval)
11532
11533
11534 def chk_updated_cfg_files(target_root, config_protect):
11535         if config_protect:
11536                 #number of directories with some protect files in them
11537                 procount=0
11538                 for x in config_protect:
11539                         x = os.path.join(target_root, x.lstrip(os.path.sep))
11540                         if not os.access(x, os.W_OK):
11541                                 # Avoid Permission denied errors generated
11542                                 # later by `find`.
11543                                 continue
11544                         try:
11545                                 mymode = os.lstat(x).st_mode
11546                         except OSError:
11547                                 continue
11548                         if stat.S_ISLNK(mymode):
11549                                 # We want to treat it like a directory if it
11550                                 # is a symlink to an existing directory.
11551                                 try:
11552                                         real_mode = os.stat(x).st_mode
11553                                         if stat.S_ISDIR(real_mode):
11554                                                 mymode = real_mode
11555                                 except OSError:
11556                                         pass
11557                         if stat.S_ISDIR(mymode):
11558                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11559                         else:
11560                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11561                                         os.path.split(x.rstrip(os.path.sep))
11562                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11563                         a = commands.getstatusoutput(mycommand)
11564                         if a[0] != 0:
11565                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11566                                 sys.stderr.flush()
11567                                 # Show the error message alone, sending stdout to /dev/null.
11568                                 os.system(mycommand + " 1>/dev/null")
11569                         else:
11570                                 files = a[1].split('\0')
11571                                 # split always produces an empty string as the last element
11572                                 if files and not files[-1]:
11573                                         del files[-1]
11574                                 if files:
11575                                         procount += 1
11576                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
11577                                         if stat.S_ISDIR(mymode):
11578                                                  print "%d config files in '%s' need updating." % \
11579                                                         (len(files), x)
11580                                         else:
11581                                                  print "config file '%s' needs updating." % x
11582
11583                 if procount:
11584                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11585                                 " section of the " + bold("emerge")
11586                         print " "+yellow("*")+" man page to learn how to update config files."
11587
11588 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11589         update=False):
11590         """
11591         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11592         Returns the number of unread (yet relevent) items.
11593         
11594         @param portdb: a portage tree database
11595         @type portdb: pordbapi
11596         @param vardb: an installed package database
11597         @type vardb: vardbapi
11598         @param NEWS_PATH:
11599         @type NEWS_PATH:
11600         @param UNREAD_PATH:
11601         @type UNREAD_PATH:
11602         @param repo_id:
11603         @type repo_id:
11604         @rtype: Integer
11605         @returns:
11606         1.  The number of unread but relevant news items.
11607         
11608         """
11609         from portage.news import NewsManager
11610         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11611         return manager.getUnreadItems( repo_id, update=update )
11612
11613 def insert_category_into_atom(atom, category):
11614         alphanum = re.search(r'\w', atom)
11615         if alphanum:
11616                 ret = atom[:alphanum.start()] + "%s/" % category + \
11617                         atom[alphanum.start():]
11618         else:
11619                 ret = None
11620         return ret
11621
11622 def is_valid_package_atom(x):
11623         if "/" not in x:
11624                 alphanum = re.search(r'\w', x)
11625                 if alphanum:
11626                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11627         return portage.isvalidatom(x)
11628
11629 def show_blocker_docs_link():
11630         print
11631         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11632         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11633         print
11634         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11635         print
11636
11637 def show_mask_docs():
11638         print "For more information, see the MASKED PACKAGES section in the emerge"
11639         print "man page or refer to the Gentoo Handbook."
11640
11641 def action_sync(settings, trees, mtimedb, myopts, myaction):
11642         xterm_titles = "notitles" not in settings.features
11643         emergelog(xterm_titles, " === sync")
11644         myportdir = settings.get("PORTDIR", None)
11645         out = portage.output.EOutput()
11646         if not myportdir:
11647                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
11648                 sys.exit(1)
11649         if myportdir[-1]=="/":
11650                 myportdir=myportdir[:-1]
11651         if not os.path.exists(myportdir):
11652                 print ">>>",myportdir,"not found, creating it."
11653                 os.makedirs(myportdir,0755)
11654         syncuri = settings.get("SYNC", "").strip()
11655         if not syncuri:
11656                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11657                         noiselevel=-1, level=logging.ERROR)
11658                 return 1
11659
11660         os.umask(0022)
11661         updatecache_flg = False
11662         if myaction == "metadata":
11663                 print "skipping sync"
11664                 updatecache_flg = True
11665         elif syncuri[:8]=="rsync://":
11666                 if not os.path.exists("/usr/bin/rsync"):
11667                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11668                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11669                         sys.exit(1)
11670                 mytimeout=180
11671
11672                 rsync_opts = []
11673                 import shlex, StringIO
11674                 if settings["PORTAGE_RSYNC_OPTS"] == "":
11675                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11676                         rsync_opts.extend([
11677                                 "--recursive",    # Recurse directories
11678                                 "--links",        # Consider symlinks
11679                                 "--safe-links",   # Ignore links outside of tree
11680                                 "--perms",        # Preserve permissions
11681                                 "--times",        # Preserive mod times
11682                                 "--compress",     # Compress the data transmitted
11683                                 "--force",        # Force deletion on non-empty dirs
11684                                 "--whole-file",   # Don't do block transfers, only entire files
11685                                 "--delete",       # Delete files that aren't in the master tree
11686                                 "--stats",        # Show final statistics about what was transfered
11687                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11688                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
11689                                 "--exclude=/local",       # Exclude local     from consideration
11690                                 "--exclude=/packages",    # Exclude packages  from consideration
11691                         ])
11692
11693                 else:
11694                         # The below validation is not needed when using the above hardcoded
11695                         # defaults.
11696
11697                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11698                         lexer = shlex.shlex(StringIO.StringIO(
11699                                 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11700                         lexer.whitespace_split = True
11701                         rsync_opts.extend(lexer)
11702                         del lexer
11703
11704                         for opt in ("--recursive", "--times"):
11705                                 if opt not in rsync_opts:
11706                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
11707                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11708                                         rsync_opts.append(opt)
11709         
11710                         for exclude in ("distfiles", "local", "packages"):
11711                                 opt = "--exclude=/%s" % exclude
11712                                 if opt not in rsync_opts:
11713                                         portage.writemsg(yellow("WARNING:") + \
11714                                         " adding required option %s not included in "  % opt + \
11715                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11716                                         rsync_opts.append(opt)
11717         
11718                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11719                                 def rsync_opt_startswith(opt_prefix):
11720                                         for x in rsync_opts:
11721                                                 if x.startswith(opt_prefix):
11722                                                         return True
11723                                         return False
11724
11725                                 if not rsync_opt_startswith("--timeout="):
11726                                         rsync_opts.append("--timeout=%d" % mytimeout)
11727
11728                                 for opt in ("--compress", "--whole-file"):
11729                                         if opt not in rsync_opts:
11730                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11731                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11732                                                 rsync_opts.append(opt)
11733
11734                 if "--quiet" in myopts:
11735                         rsync_opts.append("--quiet")    # Shut up a lot
11736                 else:
11737                         rsync_opts.append("--verbose")  # Print filelist
11738
11739                 if "--verbose" in myopts:
11740                         rsync_opts.append("--progress")  # Progress meter for each file
11741
11742                 if "--debug" in myopts:
11743                         rsync_opts.append("--checksum") # Force checksum on all files
11744
11745                 # Real local timestamp file.
11746                 servertimestampfile = os.path.join(
11747                         myportdir, "metadata", "timestamp.chk")
11748
11749                 content = portage.util.grabfile(servertimestampfile)
11750                 mytimestamp = 0
11751                 if content:
11752                         try:
11753                                 mytimestamp = time.mktime(time.strptime(content[0],
11754                                         "%a, %d %b %Y %H:%M:%S +0000"))
11755                         except (OverflowError, ValueError):
11756                                 pass
11757                 del content
11758
11759                 try:
11760                         rsync_initial_timeout = \
11761                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11762                 except ValueError:
11763                         rsync_initial_timeout = 15
11764
11765                 try:
11766                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11767                 except SystemExit, e:
11768                         raise # Needed else can't exit
11769                 except:
11770                         maxretries=3 #default number of retries
11771
11772                 retries=0
11773                 user_name, hostname, port = re.split(
11774                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11775                 if port is None:
11776                         port=""
11777                 if user_name is None:
11778                         user_name=""
11779                 updatecache_flg=True
11780                 all_rsync_opts = set(rsync_opts)
11781                 lexer = shlex.shlex(StringIO.StringIO(
11782                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
11783                 lexer.whitespace_split = True
11784                 extra_rsync_opts = list(lexer)
11785                 del lexer
11786                 all_rsync_opts.update(extra_rsync_opts)
11787                 family = socket.AF_INET
11788                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
11789                         family = socket.AF_INET
11790                 elif socket.has_ipv6 and \
11791                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
11792                         family = socket.AF_INET6
11793                 ips=[]
11794                 SERVER_OUT_OF_DATE = -1
11795                 EXCEEDED_MAX_RETRIES = -2
11796                 while (1):
11797                         if ips:
11798                                 del ips[0]
11799                         if ips==[]:
11800                                 try:
11801                                         for addrinfo in socket.getaddrinfo(
11802                                                 hostname, None, family, socket.SOCK_STREAM):
11803                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
11804                                                         # IPv6 addresses need to be enclosed in square brackets
11805                                                         ips.append("[%s]" % addrinfo[4][0])
11806                                                 else:
11807                                                         ips.append(addrinfo[4][0])
11808                                         from random import shuffle
11809                                         shuffle(ips)
11810                                 except SystemExit, e:
11811                                         raise # Needed else can't exit
11812                                 except Exception, e:
11813                                         print "Notice:",str(e)
11814                                         dosyncuri=syncuri
11815
11816                         if ips:
11817                                 try:
11818                                         dosyncuri = syncuri.replace(
11819                                                 "//" + user_name + hostname + port + "/",
11820                                                 "//" + user_name + ips[0] + port + "/", 1)
11821                                 except SystemExit, e:
11822                                         raise # Needed else can't exit
11823                                 except Exception, e:
11824                                         print "Notice:",str(e)
11825                                         dosyncuri=syncuri
11826
11827                         if (retries==0):
11828                                 if "--ask" in myopts:
11829                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
11830                                                 print
11831                                                 print "Quitting."
11832                                                 print
11833                                                 sys.exit(0)
11834                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
11835                                 if "--quiet" not in myopts:
11836                                         print ">>> Starting rsync with "+dosyncuri+"..."
11837                         else:
11838                                 emergelog(xterm_titles,
11839                                         ">>> Starting retry %d of %d with %s" % \
11840                                                 (retries,maxretries,dosyncuri))
11841                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
11842
11843                         if mytimestamp != 0 and "--quiet" not in myopts:
11844                                 print ">>> Checking server timestamp ..."
11845
11846                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
11847
11848                         if "--debug" in myopts:
11849                                 print rsynccommand
11850
11851                         exitcode = os.EX_OK
11852                         servertimestamp = 0
11853                         # Even if there's no timestamp available locally, fetch the
11854                         # timestamp anyway as an initial probe to verify that the server is
11855                         # responsive.  This protects us from hanging indefinitely on a
11856                         # connection attempt to an unresponsive server which rsync's
11857                         # --timeout option does not prevent.
11858                         if True:
11859                                 # Temporary file for remote server timestamp comparison.
11860                                 from tempfile import mkstemp
11861                                 fd, tmpservertimestampfile = mkstemp()
11862                                 os.close(fd)
11863                                 mycommand = rsynccommand[:]
11864                                 mycommand.append(dosyncuri.rstrip("/") + \
11865                                         "/metadata/timestamp.chk")
11866                                 mycommand.append(tmpservertimestampfile)
11867                                 content = None
11868                                 mypids = []
11869                                 try:
11870                                         def timeout_handler(signum, frame):
11871                                                 raise portage.exception.PortageException("timed out")
11872                                         signal.signal(signal.SIGALRM, timeout_handler)
11873                                         # Timeout here in case the server is unresponsive.  The
11874                                         # --timeout rsync option doesn't apply to the initial
11875                                         # connection attempt.
11876                                         if rsync_initial_timeout:
11877                                                 signal.alarm(rsync_initial_timeout)
11878                                         try:
11879                                                 mypids.extend(portage.process.spawn(
11880                                                         mycommand, env=settings.environ(), returnpid=True))
11881                                                 exitcode = os.waitpid(mypids[0], 0)[1]
11882                                                 content = portage.grabfile(tmpservertimestampfile)
11883                                         finally:
11884                                                 if rsync_initial_timeout:
11885                                                         signal.alarm(0)
11886                                                 try:
11887                                                         os.unlink(tmpservertimestampfile)
11888                                                 except OSError:
11889                                                         pass
11890                                 except portage.exception.PortageException, e:
11891                                         # timed out
11892                                         print e
11893                                         del e
11894                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
11895                                                 os.kill(mypids[0], signal.SIGTERM)
11896                                                 os.waitpid(mypids[0], 0)
11897                                         # This is the same code rsync uses for timeout.
11898                                         exitcode = 30
11899                                 else:
11900                                         if exitcode != os.EX_OK:
11901                                                 if exitcode & 0xff:
11902                                                         exitcode = (exitcode & 0xff) << 8
11903                                                 else:
11904                                                         exitcode = exitcode >> 8
11905                                 if mypids:
11906                                         portage.process.spawned_pids.remove(mypids[0])
11907                                 if content:
11908                                         try:
11909                                                 servertimestamp = time.mktime(time.strptime(
11910                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
11911                                         except (OverflowError, ValueError):
11912                                                 pass
11913                                 del mycommand, mypids, content
11914                         if exitcode == os.EX_OK:
11915                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
11916                                         emergelog(xterm_titles,
11917                                                 ">>> Cancelling sync -- Already current.")
11918                                         print
11919                                         print ">>>"
11920                                         print ">>> Timestamps on the server and in the local repository are the same."
11921                                         print ">>> Cancelling all further sync action. You are already up to date."
11922                                         print ">>>"
11923                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
11924                                         print ">>>"
11925                                         print
11926                                         sys.exit(0)
11927                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
11928                                         emergelog(xterm_titles,
11929                                                 ">>> Server out of date: %s" % dosyncuri)
11930                                         print
11931                                         print ">>>"
11932                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
11933                                         print ">>>"
11934                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
11935                                         print ">>>"
11936                                         print
11937                                         exitcode = SERVER_OUT_OF_DATE
11938                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
11939                                         # actual sync
11940                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
11941                                         exitcode = portage.process.spawn(mycommand,
11942                                                 env=settings.environ())
11943                                         if exitcode in [0,1,3,4,11,14,20,21]:
11944                                                 break
11945                         elif exitcode in [1,3,4,11,14,20,21]:
11946                                 break
11947                         else:
11948                                 # Code 2 indicates protocol incompatibility, which is expected
11949                                 # for servers with protocol < 29 that don't support
11950                                 # --prune-empty-directories.  Retry for a server that supports
11951                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
11952                                 pass
11953
11954                         retries=retries+1
11955
11956                         if retries<=maxretries:
11957                                 print ">>> Retrying..."
11958                                 time.sleep(11)
11959                         else:
11960                                 # over retries
11961                                 # exit loop
11962                                 updatecache_flg=False
11963                                 exitcode = EXCEEDED_MAX_RETRIES
11964                                 break
11965
11966                 if (exitcode==0):
11967                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
11968                 elif exitcode == SERVER_OUT_OF_DATE:
11969                         sys.exit(1)
11970                 elif exitcode == EXCEEDED_MAX_RETRIES:
11971                         sys.stderr.write(
11972                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
11973                         sys.exit(1)
11974                 elif (exitcode>0):
11975                         msg = []
11976                         if exitcode==1:
11977                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
11978                                 msg.append("that your SYNC statement is proper.")
11979                                 msg.append("SYNC=" + settings["SYNC"])
11980                         elif exitcode==11:
11981                                 msg.append("Rsync has reported that there is a File IO error. Normally")
11982                                 msg.append("this means your disk is full, but can be caused by corruption")
11983                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
11984                                 msg.append("and try again after the problem has been fixed.")
11985                                 msg.append("PORTDIR=" + settings["PORTDIR"])
11986                         elif exitcode==20:
11987                                 msg.append("Rsync was killed before it finished.")
11988                         else:
11989                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
11990                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
11991                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
11992                                 msg.append("temporary problem unless complications exist with your network")
11993                                 msg.append("(and possibly your system's filesystem) configuration.")
11994                         for line in msg:
11995                                 out.eerror(line)
11996                         sys.exit(exitcode)
11997         elif syncuri[:6]=="cvs://":
11998                 if not os.path.exists("/usr/bin/cvs"):
11999                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12000                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12001                         sys.exit(1)
12002                 cvsroot=syncuri[6:]
12003                 cvsdir=os.path.dirname(myportdir)
12004                 if not os.path.exists(myportdir+"/CVS"):
12005                         #initial checkout
12006                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12007                         if os.path.exists(cvsdir+"/gentoo-x86"):
12008                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12009                                 sys.exit(1)
12010                         try:
12011                                 os.rmdir(myportdir)
12012                         except OSError, e:
12013                                 if e.errno != errno.ENOENT:
12014                                         sys.stderr.write(
12015                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12016                                         sys.exit(1)
12017                                 del e
12018                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12019                                 print "!!! cvs checkout error; exiting."
12020                                 sys.exit(1)
12021                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12022                 else:
12023                         #cvs update
12024                         print ">>> Starting cvs update with "+syncuri+"..."
12025                         retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
12026                                 myportdir, settings, free=1)
12027                         if retval != os.EX_OK:
12028                                 sys.exit(retval)
12029                 dosyncuri = syncuri
12030         else:
12031                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12032                         noiselevel=-1, level=logging.ERROR)
12033                 return 1
12034
12035         if updatecache_flg and  \
12036                 myaction != "metadata" and \
12037                 "metadata-transfer" not in settings.features:
12038                 updatecache_flg = False
12039
12040         # Reload the whole config from scratch.
12041         settings, trees, mtimedb = load_emerge_config(trees=trees)
12042         root_config = trees[settings["ROOT"]]["root_config"]
12043         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12044
12045         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12046                 action_metadata(settings, portdb, myopts)
12047
12048         if portage._global_updates(trees, mtimedb["updates"]):
12049                 mtimedb.commit()
12050                 # Reload the whole config from scratch.
12051                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12052                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12053                 root_config = trees[settings["ROOT"]]["root_config"]
12054
12055         mybestpv = portdb.xmatch("bestmatch-visible",
12056                 portage.const.PORTAGE_PACKAGE_ATOM)
12057         mypvs = portage.best(
12058                 trees[settings["ROOT"]]["vartree"].dbapi.match(
12059                 portage.const.PORTAGE_PACKAGE_ATOM))
12060
12061         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12062
12063         if myaction != "metadata":
12064                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12065                         retval = portage.process.spawn(
12066                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12067                                 dosyncuri], env=settings.environ())
12068                         if retval != os.EX_OK:
12069                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12070
12071         if(mybestpv != mypvs) and not "--quiet" in myopts:
12072                 print
12073                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12074                 print red(" * ")+"that you update portage now, before any other packages are updated."
12075                 print
12076                 print red(" * ")+"To update portage, run 'emerge portage' now."
12077                 print
12078         
12079         display_news_notification(root_config, myopts)
12080         return os.EX_OK
12081
12082 def action_metadata(settings, portdb, myopts):
12083         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
12084         old_umask = os.umask(0002)
12085         cachedir = os.path.normpath(settings.depcachedir)
12086         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
12087                                         "/lib", "/opt", "/proc", "/root", "/sbin",
12088                                         "/sys", "/tmp", "/usr",  "/var"]:
12089                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12090                         "ROOT DIRECTORY ON YOUR SYSTEM."
12091                 print >> sys.stderr, \
12092                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12093                 sys.exit(73)
12094         if not os.path.exists(cachedir):
12095                 os.mkdir(cachedir)
12096
12097         ec = portage.eclass_cache.cache(portdb.porttree_root)
12098         myportdir = os.path.realpath(settings["PORTDIR"])
12099         cm = settings.load_best_module("portdbapi.metadbmodule")(
12100                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12101
12102         from portage.cache import util
12103
12104         class percentage_noise_maker(util.quiet_mirroring):
12105                 def __init__(self, dbapi):
12106                         self.dbapi = dbapi
12107                         self.cp_all = dbapi.cp_all()
12108                         l = len(self.cp_all)
12109                         self.call_update_min = 100000000
12110                         self.min_cp_all = l/100.0
12111                         self.count = 1
12112                         self.pstr = ''
12113
12114                 def __iter__(self):
12115                         for x in self.cp_all:
12116                                 self.count += 1
12117                                 if self.count > self.min_cp_all:
12118                                         self.call_update_min = 0
12119                                         self.count = 0
12120                                 for y in self.dbapi.cp_list(x):
12121                                         yield y
12122                         self.call_update_mine = 0
12123
12124                 def update(self, *arg):
12125                         try:                            self.pstr = int(self.pstr) + 1
12126                         except ValueError:      self.pstr = 1
12127                         sys.stdout.write("%s%i%%" % \
12128                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
12129                         sys.stdout.flush()
12130                         self.call_update_min = 10000000
12131
12132                 def finish(self, *arg):
12133                         sys.stdout.write("\b\b\b\b100%\n")
12134                         sys.stdout.flush()
12135
12136         if "--quiet" in myopts:
12137                 def quicky_cpv_generator(cp_all_list):
12138                         for x in cp_all_list:
12139                                 for y in portdb.cp_list(x):
12140                                         yield y
12141                 source = quicky_cpv_generator(portdb.cp_all())
12142                 noise_maker = portage.cache.util.quiet_mirroring()
12143         else:
12144                 noise_maker = source = percentage_noise_maker(portdb)
12145         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12146                 eclass_cache=ec, verbose_instance=noise_maker)
12147
12148         sys.stdout.flush()
12149         os.umask(old_umask)
12150
12151 def action_regen(settings, portdb, max_jobs, max_load):
12152         xterm_titles = "notitles" not in settings.features
12153         emergelog(xterm_titles, " === regen")
12154         #regenerate cache entries
12155         portage.writemsg_stdout("Regenerating cache entries...\n")
12156         try:
12157                 os.close(sys.stdin.fileno())
12158         except SystemExit, e:
12159                 raise # Needed else can't exit
12160         except:
12161                 pass
12162         sys.stdout.flush()
12163
12164         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12165         regen.run()
12166
12167         portage.writemsg_stdout("done!\n")
12168
12169 def action_config(settings, trees, myopts, myfiles):
12170         if len(myfiles) != 1:
12171                 print red("!!! config can only take a single package atom at this time\n")
12172                 sys.exit(1)
12173         if not is_valid_package_atom(myfiles[0]):
12174                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12175                         noiselevel=-1)
12176                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12177                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12178                 sys.exit(1)
12179         print
12180         try:
12181                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12182         except portage.exception.AmbiguousPackageName, e:
12183                 # Multiple matches thrown from cpv_expand
12184                 pkgs = e.args[0]
12185         if len(pkgs) == 0:
12186                 print "No packages found.\n"
12187                 sys.exit(0)
12188         elif len(pkgs) > 1:
12189                 if "--ask" in myopts:
12190                         options = []
12191                         print "Please select a package to configure:"
12192                         idx = 0
12193                         for pkg in pkgs:
12194                                 idx += 1
12195                                 options.append(str(idx))
12196                                 print options[-1]+") "+pkg
12197                         print "X) Cancel"
12198                         options.append("X")
12199                         idx = userquery("Selection?", options)
12200                         if idx == "X":
12201                                 sys.exit(0)
12202                         pkg = pkgs[int(idx)-1]
12203                 else:
12204                         print "The following packages available:"
12205                         for pkg in pkgs:
12206                                 print "* "+pkg
12207                         print "\nPlease use a specific atom or the --ask option."
12208                         sys.exit(1)
12209         else:
12210                 pkg = pkgs[0]
12211
12212         print
12213         if "--ask" in myopts:
12214                 if userquery("Ready to configure "+pkg+"?") == "No":
12215                         sys.exit(0)
12216         else:
12217                 print "Configuring pkg..."
12218         print
12219         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12220         mysettings = portage.config(clone=settings)
12221         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12222         debug = mysettings.get("PORTAGE_DEBUG") == "1"
12223         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12224                 mysettings,
12225                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12226                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12227         if retval == os.EX_OK:
12228                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12229                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12230         print
12231
12232 def action_info(settings, trees, myopts, myfiles):
12233         print getportageversion(settings["PORTDIR"], settings["ROOT"],
12234                 settings.profile_path, settings["CHOST"],
12235                 trees[settings["ROOT"]]["vartree"].dbapi)
12236         header_width = 65
12237         header_title = "System Settings"
12238         if myfiles:
12239                 print header_width * "="
12240                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12241         print header_width * "="
12242         print "System uname: "+platform.platform(aliased=1)
12243
12244         lastSync = portage.grabfile(os.path.join(
12245                 settings["PORTDIR"], "metadata", "timestamp.chk"))
12246         print "Timestamp of tree:",
12247         if lastSync:
12248                 print lastSync[0]
12249         else:
12250                 print "Unknown"
12251
12252         output=commands.getstatusoutput("distcc --version")
12253         if not output[0]:
12254                 print str(output[1].split("\n",1)[0]),
12255                 if "distcc" in settings.features:
12256                         print "[enabled]"
12257                 else:
12258                         print "[disabled]"
12259
12260         output=commands.getstatusoutput("ccache -V")
12261         if not output[0]:
12262                 print str(output[1].split("\n",1)[0]),
12263                 if "ccache" in settings.features:
12264                         print "[enabled]"
12265                 else:
12266                         print "[disabled]"
12267
12268         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12269                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
12270         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12271         myvars  = portage.util.unique_array(myvars)
12272         myvars.sort()
12273
12274         for x in myvars:
12275                 if portage.isvalidatom(x):
12276                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12277                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12278                         pkg_matches.sort(portage.pkgcmp)
12279                         pkgs = []
12280                         for pn, ver, rev in pkg_matches:
12281                                 if rev != "r0":
12282                                         pkgs.append(ver + "-" + rev)
12283                                 else:
12284                                         pkgs.append(ver)
12285                         if pkgs:
12286                                 pkgs = ", ".join(pkgs)
12287                                 print "%-20s %s" % (x+":", pkgs)
12288                 else:
12289                         print "%-20s %s" % (x+":", "[NOT VALID]")
12290
12291         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12292
12293         if "--verbose" in myopts:
12294                 myvars=settings.keys()
12295         else:
12296                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12297                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12298                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12299                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12300
12301                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12302
12303         myvars = portage.util.unique_array(myvars)
12304         unset_vars = []
12305         myvars.sort()
12306         for x in myvars:
12307                 if x in settings:
12308                         if x != "USE":
12309                                 print '%s="%s"' % (x, settings[x])
12310                         else:
12311                                 use = set(settings["USE"].split())
12312                                 use_expand = settings["USE_EXPAND"].split()
12313                                 use_expand.sort()
12314                                 for varname in use_expand:
12315                                         flag_prefix = varname.lower() + "_"
12316                                         for f in list(use):
12317                                                 if f.startswith(flag_prefix):
12318                                                         use.remove(f)
12319                                 use = list(use)
12320                                 use.sort()
12321                                 print 'USE="%s"' % " ".join(use),
12322                                 for varname in use_expand:
12323                                         myval = settings.get(varname)
12324                                         if myval:
12325                                                 print '%s="%s"' % (varname, myval),
12326                                 print
12327                 else:
12328                         unset_vars.append(x)
12329         if unset_vars:
12330                 print "Unset:  "+", ".join(unset_vars)
12331         print
12332
12333         if "--debug" in myopts:
12334                 for x in dir(portage):
12335                         module = getattr(portage, x)
12336                         if "cvs_id_string" in dir(module):
12337                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
12338
12339         # See if we can find any packages installed matching the strings
12340         # passed on the command line
12341         mypkgs = []
12342         vardb = trees[settings["ROOT"]]["vartree"].dbapi
12343         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12344         for x in myfiles:
12345                 mypkgs.extend(vardb.match(x))
12346
12347         # If some packages were found...
12348         if mypkgs:
12349                 # Get our global settings (we only print stuff if it varies from
12350                 # the current config)
12351                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12352                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12353                 global_vals = {}
12354                 pkgsettings = portage.config(clone=settings)
12355
12356                 for myvar in mydesiredvars:
12357                         global_vals[myvar] = set(settings.get(myvar, "").split())
12358
12359                 # Loop through each package
12360                 # Only print settings if they differ from global settings
12361                 header_title = "Package Settings"
12362                 print header_width * "="
12363                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12364                 print header_width * "="
12365                 from portage.output import EOutput
12366                 out = EOutput()
12367                 for pkg in mypkgs:
12368                         # Get all package specific variables
12369                         auxvalues = vardb.aux_get(pkg, auxkeys)
12370                         valuesmap = {}
12371                         for i in xrange(len(auxkeys)):
12372                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12373                         diff_values = {}
12374                         for myvar in mydesiredvars:
12375                                 # If the package variable doesn't match the
12376                                 # current global variable, something has changed
12377                                 # so set diff_found so we know to print
12378                                 if valuesmap[myvar] != global_vals[myvar]:
12379                                         diff_values[myvar] = valuesmap[myvar]
12380                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12381                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12382                         pkgsettings.reset()
12383                         # If a matching ebuild is no longer available in the tree, maybe it
12384                         # would make sense to compare against the flags for the best
12385                         # available version with the same slot?
12386                         mydb = None
12387                         if portdb.cpv_exists(pkg):
12388                                 mydb = portdb
12389                         pkgsettings.setcpv(pkg, mydb=mydb)
12390                         if valuesmap["IUSE"].intersection(
12391                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12392                                 diff_values["USE"] = valuesmap["USE"]
12393                         # If a difference was found, print the info for
12394                         # this package.
12395                         if diff_values:
12396                                 # Print package info
12397                                 print "%s was built with the following:" % pkg
12398                                 for myvar in mydesiredvars + ["USE"]:
12399                                         if myvar in diff_values:
12400                                                 mylist = list(diff_values[myvar])
12401                                                 mylist.sort()
12402                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12403                                 print
12404                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
12405                         ebuildpath = vardb.findname(pkg)
12406                         if not ebuildpath or not os.path.exists(ebuildpath):
12407                                 out.ewarn("No ebuild found for '%s'" % pkg)
12408                                 continue
12409                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12410                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12411                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12412                                 tree="vartree")
12413
12414 def action_search(root_config, myopts, myfiles, spinner):
12415         if not myfiles:
12416                 print "emerge: no search terms provided."
12417         else:
12418                 searchinstance = search(root_config,
12419                         spinner, "--searchdesc" in myopts,
12420                         "--quiet" not in myopts, "--usepkg" in myopts,
12421                         "--usepkgonly" in myopts)
12422                 for mysearch in myfiles:
12423                         try:
12424                                 searchinstance.execute(mysearch)
12425                         except re.error, comment:
12426                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12427                                 sys.exit(1)
12428                         searchinstance.output()
12429
12430 def action_depclean(settings, trees, ldpath_mtimes,
12431         myopts, action, myfiles, spinner):
12432         # Kill packages that aren't explicitly merged or are required as a
12433         # dependency of another package. World file is explicit.
12434
12435         # Global depclean or prune operations are not very safe when there are
12436         # missing dependencies since it's unknown how badly incomplete
12437         # the dependency graph is, and we might accidentally remove packages
12438         # that should have been pulled into the graph. On the other hand, it's
12439         # relatively safe to ignore missing deps when only asked to remove
12440         # specific packages.
12441         allow_missing_deps = len(myfiles) > 0
12442
12443         msg = []
12444         msg.append("Always study the list of packages to be cleaned for any obvious\n")
12445         msg.append("mistakes. Packages that are part of the world set will always\n")
12446         msg.append("be kept.  They can be manually added to this set with\n")
12447         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
12448         msg.append("package.provided (see portage(5)) will be removed by\n")
12449         msg.append("depclean, even if they are part of the world set.\n")
12450         msg.append("\n")
12451         msg.append("As a safety measure, depclean will not remove any packages\n")
12452         msg.append("unless *all* required dependencies have been resolved.  As a\n")
12453         msg.append("consequence, it is often necessary to run %s\n" % \
12454                 good("`emerge --update"))
12455         msg.append(good("--newuse --deep @system @world`") + \
12456                 " prior to depclean.\n")
12457
12458         if action == "depclean" and "--quiet" not in myopts and not myfiles:
12459                 portage.writemsg_stdout("\n")
12460                 for x in msg:
12461                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
12462
12463         xterm_titles = "notitles" not in settings.features
12464         myroot = settings["ROOT"]
12465         root_config = trees[myroot]["root_config"]
12466         getSetAtoms = root_config.setconfig.getSetAtoms
12467         vardb = trees[myroot]["vartree"].dbapi
12468
12469         required_set_names = ("system", "world")
12470         required_sets = {}
12471         set_args = []
12472
12473         for s in required_set_names:
12474                 required_sets[s] = InternalPackageSet(
12475                         initial_atoms=getSetAtoms(s))
12476
12477         
12478         # When removing packages, use a temporary version of world
12479         # which excludes packages that are intended to be eligible for
12480         # removal.
12481         world_temp_set = required_sets["world"]
12482         system_set = required_sets["system"]
12483
12484         if not system_set or not world_temp_set:
12485
12486                 if not system_set:
12487                         writemsg_level("!!! You have no system list.\n",
12488                                 level=logging.ERROR, noiselevel=-1)
12489
12490                 if not world_temp_set:
12491                         writemsg_level("!!! You have no world file.\n",
12492                                         level=logging.WARNING, noiselevel=-1)
12493
12494                 writemsg_level("!!! Proceeding is likely to " + \
12495                         "break your installation.\n",
12496                         level=logging.WARNING, noiselevel=-1)
12497                 if "--pretend" not in myopts:
12498                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12499
12500         if action == "depclean":
12501                 emergelog(xterm_titles, " >>> depclean")
12502
12503         import textwrap
12504         args_set = InternalPackageSet()
12505         if myfiles:
12506                 for x in myfiles:
12507                         if not is_valid_package_atom(x):
12508                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12509                                         level=logging.ERROR, noiselevel=-1)
12510                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12511                                 return
12512                         try:
12513                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12514                         except portage.exception.AmbiguousPackageName, e:
12515                                 msg = "The short ebuild name \"" + x + \
12516                                         "\" is ambiguous.  Please specify " + \
12517                                         "one of the following " + \
12518                                         "fully-qualified ebuild names instead:"
12519                                 for line in textwrap.wrap(msg, 70):
12520                                         writemsg_level("!!! %s\n" % (line,),
12521                                                 level=logging.ERROR, noiselevel=-1)
12522                                 for i in e[0]:
12523                                         writemsg_level("    %s\n" % colorize("INFORM", i),
12524                                                 level=logging.ERROR, noiselevel=-1)
12525                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12526                                 return
12527                         args_set.add(atom)
12528                 matched_packages = False
12529                 for x in args_set:
12530                         if vardb.match(x):
12531                                 matched_packages = True
12532                                 break
12533                 if not matched_packages:
12534                         writemsg_level(">>> No packages selected for removal by %s\n" % \
12535                                 action)
12536                         return
12537
12538         writemsg_level("\nCalculating dependencies  ")
12539         resolver_params = create_depgraph_params(myopts, "remove")
12540         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12541         vardb = resolver.trees[myroot]["vartree"].dbapi
12542
12543         if action == "depclean":
12544
12545                 if args_set:
12546                         # Pull in everything that's installed but not matched
12547                         # by an argument atom since we don't want to clean any
12548                         # package if something depends on it.
12549
12550                         world_temp_set.clear()
12551                         for pkg in vardb:
12552                                 spinner.update()
12553
12554                                 try:
12555                                         if args_set.findAtomForPackage(pkg) is None:
12556                                                 world_temp_set.add("=" + pkg.cpv)
12557                                                 continue
12558                                 except portage.exception.InvalidDependString, e:
12559                                         show_invalid_depstring_notice(pkg,
12560                                                 pkg.metadata["PROVIDE"], str(e))
12561                                         del e
12562                                         world_temp_set.add("=" + pkg.cpv)
12563                                         continue
12564
12565         elif action == "prune":
12566
12567                 # Pull in everything that's installed since we don't
12568                 # to prune a package if something depends on it.
12569                 world_temp_set.clear()
12570                 world_temp_set.update(vardb.cp_all())
12571
12572                 if not args_set:
12573
12574                         # Try to prune everything that's slotted.
12575                         for cp in vardb.cp_all():
12576                                 if len(vardb.cp_list(cp)) > 1:
12577                                         args_set.add(cp)
12578
12579                 # Remove atoms from world that match installed packages
12580                 # that are also matched by argument atoms, but do not remove
12581                 # them if they match the highest installed version.
12582                 for pkg in vardb:
12583                         spinner.update()
12584                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12585                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
12586                                 raise AssertionError("package expected in matches: " + \
12587                                         "cp = %s, cpv = %s matches = %s" % \
12588                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12589
12590                         highest_version = pkgs_for_cp[-1]
12591                         if pkg == highest_version:
12592                                 # pkg is the highest version
12593                                 world_temp_set.add("=" + pkg.cpv)
12594                                 continue
12595
12596                         if len(pkgs_for_cp) <= 1:
12597                                 raise AssertionError("more packages expected: " + \
12598                                         "cp = %s, cpv = %s matches = %s" % \
12599                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12600
12601                         try:
12602                                 if args_set.findAtomForPackage(pkg) is None:
12603                                         world_temp_set.add("=" + pkg.cpv)
12604                                         continue
12605                         except portage.exception.InvalidDependString, e:
12606                                 show_invalid_depstring_notice(pkg,
12607                                         pkg.metadata["PROVIDE"], str(e))
12608                                 del e
12609                                 world_temp_set.add("=" + pkg.cpv)
12610                                 continue
12611
12612         set_args = {}
12613         for s, package_set in required_sets.iteritems():
12614                 set_atom = SETPREFIX + s
12615                 set_arg = SetArg(arg=set_atom, set=package_set,
12616                         root_config=resolver.roots[myroot])
12617                 set_args[s] = set_arg
12618                 for atom in set_arg.set:
12619                         resolver._dep_stack.append(
12620                                 Dependency(atom=atom, root=myroot, parent=set_arg))
12621                         resolver.digraph.add(set_arg, None)
12622
12623         success = resolver._complete_graph()
12624         writemsg_level("\b\b... done!\n")
12625
12626         resolver.display_problems()
12627
12628         if not success:
12629                 return 1
12630
12631         def unresolved_deps():
12632
12633                 unresolvable = set()
12634                 for dep in resolver._initially_unsatisfied_deps:
12635                         if isinstance(dep.parent, Package) and \
12636                                 (dep.priority > UnmergeDepPriority.SOFT):
12637                                 unresolvable.add((dep.atom, dep.parent.cpv))
12638
12639                 if not unresolvable:
12640                         return False
12641
12642                 if unresolvable and not allow_missing_deps:
12643                         prefix = bad(" * ")
12644                         msg = []
12645                         msg.append("Dependencies could not be completely resolved due to")
12646                         msg.append("the following required packages not being installed:")
12647                         msg.append("")
12648                         for atom, parent in unresolvable:
12649                                 msg.append("  %s pulled in by:" % (atom,))
12650                                 msg.append("    %s" % (parent,))
12651                                 msg.append("")
12652                         msg.append("Have you forgotten to run " + \
12653                                 good("`emerge --update --newuse --deep world`") + " prior to")
12654                         msg.append(("%s?  It may be necessary to manually " + \
12655                                 "uninstall packages that no longer") % action)
12656                         msg.append("exist in the portage tree since " + \
12657                                 "it may not be possible to satisfy their")
12658                         msg.append("dependencies.  Also, be aware of " + \
12659                                 "the --with-bdeps option that is documented")
12660                         msg.append("in " + good("`man emerge`") + ".")
12661                         if action == "prune":
12662                                 msg.append("")
12663                                 msg.append("If you would like to ignore " + \
12664                                         "dependencies then use %s." % good("--nodeps"))
12665                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
12666                                 level=logging.ERROR, noiselevel=-1)
12667                         return True
12668                 return False
12669
12670         if unresolved_deps():
12671                 return 1
12672
12673         graph = resolver.digraph.copy()
12674         required_pkgs_total = 0
12675         for node in graph:
12676                 if isinstance(node, Package):
12677                         required_pkgs_total += 1
12678
12679         def show_parents(child_node):
12680                 parent_nodes = graph.parent_nodes(child_node)
12681                 if not parent_nodes:
12682                         # With --prune, the highest version can be pulled in without any
12683                         # real parent since all installed packages are pulled in.  In that
12684                         # case there's nothing to show here.
12685                         return
12686                 parent_strs = []
12687                 for node in parent_nodes:
12688                         parent_strs.append(str(getattr(node, "cpv", node)))
12689                 parent_strs.sort()
12690                 msg = []
12691                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
12692                 for parent_str in parent_strs:
12693                         msg.append("    %s\n" % (parent_str,))
12694                 msg.append("\n")
12695                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
12696
12697         def create_cleanlist():
12698                 pkgs_to_remove = []
12699
12700                 if action == "depclean":
12701                         if args_set:
12702
12703                                 for pkg in vardb:
12704                                         arg_atom = None
12705                                         try:
12706                                                 arg_atom = args_set.findAtomForPackage(pkg)
12707                                         except portage.exception.InvalidDependString:
12708                                                 # this error has already been displayed by now
12709                                                 continue
12710
12711                                         if arg_atom:
12712                                                 if pkg not in graph:
12713                                                         pkgs_to_remove.append(pkg)
12714                                                 elif "--verbose" in myopts:
12715                                                         show_parents(pkg)
12716
12717                         else:
12718                                 for pkg in vardb:
12719                                         if pkg not in graph:
12720                                                 pkgs_to_remove.append(pkg)
12721                                         elif "--verbose" in myopts:
12722                                                 show_parents(pkg)
12723
12724                 elif action == "prune":
12725                         # Prune really uses all installed instead of world. It's not
12726                         # a real reverse dependency so don't display it as such.
12727                         graph.remove(set_args["world"])
12728
12729                         for atom in args_set:
12730                                 for pkg in vardb.match_pkgs(atom):
12731                                         if pkg not in graph:
12732                                                 pkgs_to_remove.append(pkg)
12733                                         elif "--verbose" in myopts:
12734                                                 show_parents(pkg)
12735
12736                 if not pkgs_to_remove:
12737                         writemsg_level(
12738                                 ">>> No packages selected for removal by %s\n" % action)
12739                         if "--verbose" not in myopts:
12740                                 writemsg_level(
12741                                         ">>> To see reverse dependencies, use %s\n" % \
12742                                                 good("--verbose"))
12743                         if action == "prune":
12744                                 writemsg_level(
12745                                         ">>> To ignore dependencies, use %s\n" % \
12746                                                 good("--nodeps"))
12747
12748                 return pkgs_to_remove
12749
12750         cleanlist = create_cleanlist()
12751
12752         if len(cleanlist):
12753                 clean_set = set(cleanlist)
12754
12755                 # Check if any of these package are the sole providers of libraries
12756                 # with consumers that have not been selected for removal. If so, these
12757                 # packages and any dependencies need to be added to the graph.
12758                 real_vardb = trees[myroot]["vartree"].dbapi
12759                 linkmap = real_vardb.linkmap
12760                 liblist = linkmap.listLibraryObjects()
12761                 consumer_cache = {}
12762                 provider_cache = {}
12763                 soname_cache = {}
12764                 consumer_map = {}
12765
12766                 writemsg_level(">>> Checking for lib consumers...\n")
12767
12768                 for pkg in cleanlist:
12769                         pkg_dblink = real_vardb._dblink(pkg.cpv)
12770                         provided_libs = set()
12771
12772                         for lib in liblist:
12773                                 if pkg_dblink.isowner(lib, myroot):
12774                                         provided_libs.add(lib)
12775
12776                         if not provided_libs:
12777                                 continue
12778
12779                         consumers = {}
12780                         for lib in provided_libs:
12781                                 lib_consumers = consumer_cache.get(lib)
12782                                 if lib_consumers is None:
12783                                         lib_consumers = linkmap.findConsumers(lib)
12784                                         consumer_cache[lib] = lib_consumers
12785                                 if lib_consumers:
12786                                         consumers[lib] = lib_consumers
12787
12788                         if not consumers:
12789                                 continue
12790
12791                         for lib, lib_consumers in consumers.items():
12792                                 for consumer_file in list(lib_consumers):
12793                                         if pkg_dblink.isowner(consumer_file, myroot):
12794                                                 lib_consumers.remove(consumer_file)
12795                                 if not lib_consumers:
12796                                         del consumers[lib]
12797
12798                         if not consumers:
12799                                 continue
12800
12801                         for lib, lib_consumers in consumers.iteritems():
12802
12803                                 soname = soname_cache.get(lib)
12804                                 if soname is None:
12805                                         soname = linkmap.getSoname(lib)
12806                                         soname_cache[lib] = soname
12807
12808                                 consumer_providers = []
12809                                 for lib_consumer in lib_consumers:
12810                                         providers = provider_cache.get(lib)
12811                                         if providers is None:
12812                                                 providers = linkmap.findProviders(lib_consumer)
12813                                                 provider_cache[lib_consumer] = providers
12814                                         if soname not in providers:
12815                                                 # Why does this happen?
12816                                                 continue
12817                                         consumer_providers.append(
12818                                                 (lib_consumer, providers[soname]))
12819
12820                                 consumers[lib] = consumer_providers
12821
12822                         consumer_map[pkg] = consumers
12823
12824                 if consumer_map:
12825
12826                         search_files = set()
12827                         for consumers in consumer_map.itervalues():
12828                                 for lib, consumer_providers in consumers.iteritems():
12829                                         for lib_consumer, providers in consumer_providers:
12830                                                 search_files.add(lib_consumer)
12831                                                 search_files.update(providers)
12832
12833                         writemsg_level(">>> Assigning files to packages...\n")
12834                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
12835
12836                         for pkg, consumers in consumer_map.items():
12837                                 for lib, consumer_providers in consumers.items():
12838                                         lib_consumers = set()
12839
12840                                         for lib_consumer, providers in consumer_providers:
12841                                                 owner_set = file_owners.get(lib_consumer)
12842                                                 provider_dblinks = set()
12843                                                 provider_pkgs = set()
12844
12845                                                 if len(providers) > 1:
12846                                                         for provider in providers:
12847                                                                 provider_set = file_owners.get(provider)
12848                                                                 if provider_set is not None:
12849                                                                         provider_dblinks.update(provider_set)
12850
12851                                                 if len(provider_dblinks) > 1:
12852                                                         for provider_dblink in provider_dblinks:
12853                                                                 pkg_key = ("installed", myroot,
12854                                                                         provider_dblink.mycpv, "nomerge")
12855                                                                 if pkg_key not in clean_set:
12856                                                                         provider_pkgs.add(vardb.get(pkg_key))
12857
12858                                                 if provider_pkgs:
12859                                                         continue
12860
12861                                                 if owner_set is not None:
12862                                                         lib_consumers.update(owner_set)
12863
12864                                         for consumer_dblink in list(lib_consumers):
12865                                                 if ("installed", myroot, consumer_dblink.mycpv,
12866                                                         "nomerge") in clean_set:
12867                                                         lib_consumers.remove(consumer_dblink)
12868                                                         continue
12869
12870                                         if lib_consumers:
12871                                                 consumers[lib] = lib_consumers
12872                                         else:
12873                                                 del consumers[lib]
12874                                 if not consumers:
12875                                         del consumer_map[pkg]
12876
12877                 if consumer_map:
12878                         # TODO: Implement a package set for rebuilding consumer packages.
12879
12880                         msg = "In order to avoid breakage of link level " + \
12881                                 "dependencies, one or more packages will not be removed. " + \
12882                                 "This can be solved by rebuilding " + \
12883                                 "the packages that pulled them in."
12884
12885                         prefix = bad(" * ")
12886                         from textwrap import wrap
12887                         writemsg_level("".join(prefix + "%s\n" % line for \
12888                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
12889
12890                         msg = []
12891                         for pkg, consumers in consumer_map.iteritems():
12892                                 unique_consumers = set(chain(*consumers.values()))
12893                                 unique_consumers = sorted(consumer.mycpv \
12894                                         for consumer in unique_consumers)
12895                                 msg.append("")
12896                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
12897                                 for consumer in unique_consumers:
12898                                         msg.append("    %s" % (consumer,))
12899                         msg.append("")
12900                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
12901                                 level=logging.WARNING, noiselevel=-1)
12902
12903                         # Add lib providers to the graph as children of lib consumers,
12904                         # and also add any dependencies pulled in by the provider.
12905                         writemsg_level(">>> Adding lib providers to graph...\n")
12906
12907                         for pkg, consumers in consumer_map.iteritems():
12908                                 for consumer_dblink in set(chain(*consumers.values())):
12909                                         consumer_pkg = vardb.get(("installed", myroot,
12910                                                 consumer_dblink.mycpv, "nomerge"))
12911                                         if not resolver._add_pkg(pkg,
12912                                                 Dependency(parent=consumer_pkg,
12913                                                 priority=UnmergeDepPriority(runtime=True),
12914                                                 root=pkg.root)):
12915                                                 resolver.display_problems()
12916                                                 return 1
12917
12918                         writemsg_level("\nCalculating dependencies  ")
12919                         success = resolver._complete_graph()
12920                         writemsg_level("\b\b... done!\n")
12921                         resolver.display_problems()
12922                         if not success:
12923                                 return 1
12924                         if unresolved_deps():
12925                                 return 1
12926
12927                         graph = resolver.digraph.copy()
12928                         required_pkgs_total = 0
12929                         for node in graph:
12930                                 if isinstance(node, Package):
12931                                         required_pkgs_total += 1
12932                         cleanlist = create_cleanlist()
12933                         if not cleanlist:
12934                                 return 0
12935                         clean_set = set(cleanlist)
12936
12937                 # Use a topological sort to create an unmerge order such that
12938                 # each package is unmerged before it's dependencies. This is
12939                 # necessary to avoid breaking things that may need to run
12940                 # during pkg_prerm or pkg_postrm phases.
12941
12942                 # Create a new graph to account for dependencies between the
12943                 # packages being unmerged.
12944                 graph = digraph()
12945                 del cleanlist[:]
12946
12947                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
12948                 runtime = UnmergeDepPriority(runtime=True)
12949                 runtime_post = UnmergeDepPriority(runtime_post=True)
12950                 buildtime = UnmergeDepPriority(buildtime=True)
12951                 priority_map = {
12952                         "RDEPEND": runtime,
12953                         "PDEPEND": runtime_post,
12954                         "DEPEND": buildtime,
12955                 }
12956
12957                 for node in clean_set:
12958                         graph.add(node, None)
12959                         mydeps = []
12960                         node_use = node.metadata["USE"].split()
12961                         for dep_type in dep_keys:
12962                                 depstr = node.metadata[dep_type]
12963                                 if not depstr:
12964                                         continue
12965                                 try:
12966                                         portage.dep._dep_check_strict = False
12967                                         success, atoms = portage.dep_check(depstr, None, settings,
12968                                                 myuse=node_use, trees=resolver._graph_trees,
12969                                                 myroot=myroot)
12970                                 finally:
12971                                         portage.dep._dep_check_strict = True
12972                                 if not success:
12973                                         # Ignore invalid deps of packages that will
12974                                         # be uninstalled anyway.
12975                                         continue
12976
12977                                 priority = priority_map[dep_type]
12978                                 for atom in atoms:
12979                                         if not isinstance(atom, portage.dep.Atom):
12980                                                 # Ignore invalid atoms returned from dep_check().
12981                                                 continue
12982                                         if atom.blocker:
12983                                                 continue
12984                                         matches = vardb.match_pkgs(atom)
12985                                         if not matches:
12986                                                 continue
12987                                         for child_node in matches:
12988                                                 if child_node in clean_set:
12989                                                         graph.add(child_node, node, priority=priority)
12990
12991                 ordered = True
12992                 if len(graph.order) == len(graph.root_nodes()):
12993                         # If there are no dependencies between packages
12994                         # let unmerge() group them by cat/pn.
12995                         ordered = False
12996                         cleanlist = [pkg.cpv for pkg in graph.order]
12997                 else:
12998                         # Order nodes from lowest to highest overall reference count for
12999                         # optimal root node selection.
13000                         node_refcounts = {}
13001                         for node in graph.order:
13002                                 node_refcounts[node] = len(graph.parent_nodes(node))
13003                         def cmp_reference_count(node1, node2):
13004                                 return node_refcounts[node1] - node_refcounts[node2]
13005                         graph.order.sort(cmp_reference_count)
13006         
13007                         ignore_priority_range = [None]
13008                         ignore_priority_range.extend(
13009                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13010                         while not graph.empty():
13011                                 for ignore_priority in ignore_priority_range:
13012                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
13013                                         if nodes:
13014                                                 break
13015                                 if not nodes:
13016                                         raise AssertionError("no root nodes")
13017                                 if ignore_priority is not None:
13018                                         # Some deps have been dropped due to circular dependencies,
13019                                         # so only pop one node in order do minimize the number that
13020                                         # are dropped.
13021                                         del nodes[1:]
13022                                 for node in nodes:
13023                                         graph.remove(node)
13024                                         cleanlist.append(node.cpv)
13025
13026                 unmerge(root_config, myopts, "unmerge", cleanlist,
13027                         ldpath_mtimes, ordered=ordered)
13028
13029         if action == "prune":
13030                 return
13031
13032         if not cleanlist and "--quiet" in myopts:
13033                 return
13034
13035         print "Packages installed:   "+str(len(vardb.cpv_all()))
13036         print "Packages in world:    " + \
13037                 str(len(root_config.sets["world"].getAtoms()))
13038         print "Packages in system:   " + \
13039                 str(len(root_config.sets["system"].getAtoms()))
13040         print "Required packages:    "+str(required_pkgs_total)
13041         if "--pretend" in myopts:
13042                 print "Number to remove:     "+str(len(cleanlist))
13043         else:
13044                 print "Number removed:       "+str(len(cleanlist))
13045
13046 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13047         skip_masked=False, skip_unsatisfied=False):
13048         """
13049         Construct a depgraph for the given resume list. This will raise
13050         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13051         @rtype: tuple
13052         @returns: (success, depgraph, dropped_tasks)
13053         """
13054         mergelist = mtimedb["resume"]["mergelist"]
13055         dropped_tasks = set()
13056         while True:
13057                 mydepgraph = depgraph(settings, trees,
13058                         myopts, myparams, spinner)
13059                 try:
13060                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13061                                 skip_masked=skip_masked)
13062                 except depgraph.UnsatisfiedResumeDep, e:
13063                         if not skip_unsatisfied:
13064                                 raise
13065
13066                         graph = mydepgraph.digraph
13067                         unsatisfied_parents = dict((dep.parent, dep.parent) \
13068                                 for dep in e.value)
13069                         traversed_nodes = set()
13070                         unsatisfied_stack = list(unsatisfied_parents)
13071                         while unsatisfied_stack:
13072                                 pkg = unsatisfied_stack.pop()
13073                                 if pkg in traversed_nodes:
13074                                         continue
13075                                 traversed_nodes.add(pkg)
13076
13077                                 # If this package was pulled in by a parent
13078                                 # package scheduled for merge, removing this
13079                                 # package may cause the the parent package's
13080                                 # dependency to become unsatisfied.
13081                                 for parent_node in graph.parent_nodes(pkg):
13082                                         if not isinstance(parent_node, Package) \
13083                                                 or parent_node.operation not in ("merge", "nomerge"):
13084                                                 continue
13085                                         unsatisfied = \
13086                                                 graph.child_nodes(parent_node,
13087                                                 ignore_priority=DepPriority.SOFT)
13088                                         if pkg in unsatisfied:
13089                                                 unsatisfied_parents[parent_node] = parent_node
13090                                                 unsatisfied_stack.append(parent_node)
13091
13092                         pruned_mergelist = [x for x in mergelist \
13093                                 if isinstance(x, list) and \
13094                                 tuple(x) not in unsatisfied_parents]
13095
13096                         # If the mergelist doesn't shrink then this loop is infinite.
13097                         if len(pruned_mergelist) == len(mergelist):
13098                                 # This happens if a package can't be dropped because
13099                                 # it's already installed, but it has unsatisfied PDEPEND.
13100                                 raise
13101                         mergelist[:] = pruned_mergelist
13102
13103                         # Exclude installed packages that have been removed from the graph due
13104                         # to failure to build/install runtime dependencies after the dependent
13105                         # package has already been installed.
13106                         dropped_tasks.update(pkg for pkg in \
13107                                 unsatisfied_parents if pkg.operation != "nomerge")
13108                         mydepgraph.break_refs(unsatisfied_parents)
13109
13110                         del e, graph, traversed_nodes, \
13111                                 unsatisfied_parents, unsatisfied_stack
13112                         continue
13113                 else:
13114                         break
13115         return (success, mydepgraph, dropped_tasks)
13116
13117 def action_build(settings, trees, mtimedb,
13118         myopts, myaction, myfiles, spinner):
13119
13120         # validate the state of the resume data
13121         # so that we can make assumptions later.
13122         for k in ("resume", "resume_backup"):
13123                 if k not in mtimedb:
13124                         continue
13125                 resume_data = mtimedb[k]
13126                 if not isinstance(resume_data, dict):
13127                         del mtimedb[k]
13128                         continue
13129                 mergelist = resume_data.get("mergelist")
13130                 if not isinstance(mergelist, list):
13131                         del mtimedb[k]
13132                         continue
13133                 resume_opts = resume_data.get("myopts")
13134                 if not isinstance(resume_opts, (dict, list)):
13135                         del mtimedb[k]
13136                         continue
13137                 favorites = resume_data.get("favorites")
13138                 if not isinstance(favorites, list):
13139                         del mtimedb[k]
13140                         continue
13141
13142         resume = False
13143         if "--resume" in myopts and \
13144                 ("resume" in mtimedb or
13145                 "resume_backup" in mtimedb):
13146                 resume = True
13147                 if "resume" not in mtimedb:
13148                         mtimedb["resume"] = mtimedb["resume_backup"]
13149                         del mtimedb["resume_backup"]
13150                         mtimedb.commit()
13151                 # "myopts" is a list for backward compatibility.
13152                 resume_opts = mtimedb["resume"].get("myopts", [])
13153                 if isinstance(resume_opts, list):
13154                         resume_opts = dict((k,True) for k in resume_opts)
13155                 for opt in ("--skipfirst", "--ask", "--tree"):
13156                         resume_opts.pop(opt, None)
13157                 myopts.update(resume_opts)
13158
13159                 if "--debug" in myopts:
13160                         writemsg_level("myopts %s\n" % (myopts,))
13161
13162                 # Adjust config according to options of the command being resumed.
13163                 for myroot in trees:
13164                         mysettings =  trees[myroot]["vartree"].settings
13165                         mysettings.unlock()
13166                         adjust_config(myopts, mysettings)
13167                         mysettings.lock()
13168                         del myroot, mysettings
13169
13170         ldpath_mtimes = mtimedb["ldpath"]
13171         favorites=[]
13172         merge_count = 0
13173         buildpkgonly = "--buildpkgonly" in myopts
13174         pretend = "--pretend" in myopts
13175         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13176         ask = "--ask" in myopts
13177         nodeps = "--nodeps" in myopts
13178         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13179         tree = "--tree" in myopts
13180         if nodeps and tree:
13181                 tree = False
13182                 del myopts["--tree"]
13183                 portage.writemsg(colorize("WARN", " * ") + \
13184                         "--tree is broken with --nodeps. Disabling...\n")
13185         debug = "--debug" in myopts
13186         verbose = "--verbose" in myopts
13187         quiet = "--quiet" in myopts
13188         if pretend or fetchonly:
13189                 # make the mtimedb readonly
13190                 mtimedb.filename = None
13191         if "--digest" in myopts:
13192                 msg = "The --digest option can prevent corruption from being" + \
13193                         " noticed. The `repoman manifest` command is the preferred" + \
13194                         " way to generate manifests and it is capable of doing an" + \
13195                         " entire repository or category at once."
13196                 prefix = bad(" * ")
13197                 writemsg(prefix + "\n")
13198                 from textwrap import wrap
13199                 for line in wrap(msg, 72):
13200                         writemsg("%s%s\n" % (prefix, line))
13201                 writemsg(prefix + "\n")
13202
13203         if "--quiet" not in myopts and \
13204                 ("--pretend" in myopts or "--ask" in myopts or \
13205                 "--tree" in myopts or "--verbose" in myopts):
13206                 action = ""
13207                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13208                         action = "fetched"
13209                 elif "--buildpkgonly" in myopts:
13210                         action = "built"
13211                 else:
13212                         action = "merged"
13213                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13214                         print
13215                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
13216                         print
13217                 else:
13218                         print
13219                         print darkgreen("These are the packages that would be %s, in order:") % action
13220                         print
13221
13222         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13223         if not show_spinner:
13224                 spinner.update = spinner.update_quiet
13225
13226         if resume:
13227                 favorites = mtimedb["resume"].get("favorites")
13228                 if not isinstance(favorites, list):
13229                         favorites = []
13230
13231                 if show_spinner:
13232                         print "Calculating dependencies  ",
13233                 myparams = create_depgraph_params(myopts, myaction)
13234
13235                 resume_data = mtimedb["resume"]
13236                 mergelist = resume_data["mergelist"]
13237                 if mergelist and "--skipfirst" in myopts:
13238                         for i, task in enumerate(mergelist):
13239                                 if isinstance(task, list) and \
13240                                         task and task[-1] == "merge":
13241                                         del mergelist[i]
13242                                         break
13243
13244                 skip_masked      = "--skipfirst" in myopts
13245                 skip_unsatisfied = "--skipfirst" in myopts
13246                 success = False
13247                 mydepgraph = None
13248                 try:
13249                         success, mydepgraph, dropped_tasks = resume_depgraph(
13250                                 settings, trees, mtimedb, myopts, myparams, spinner,
13251                                 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13252                 except (portage.exception.PackageNotFound,
13253                         depgraph.UnsatisfiedResumeDep), e:
13254                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
13255                                 mydepgraph = e.depgraph
13256                         if show_spinner:
13257                                 print
13258                         from textwrap import wrap
13259                         from portage.output import EOutput
13260                         out = EOutput()
13261
13262                         resume_data = mtimedb["resume"]
13263                         mergelist = resume_data.get("mergelist")
13264                         if not isinstance(mergelist, list):
13265                                 mergelist = []
13266                         if mergelist and debug or (verbose and not quiet):
13267                                 out.eerror("Invalid resume list:")
13268                                 out.eerror("")
13269                                 indent = "  "
13270                                 for task in mergelist:
13271                                         if isinstance(task, list):
13272                                                 out.eerror(indent + str(tuple(task)))
13273                                 out.eerror("")
13274
13275                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
13276                                 out.eerror("One or more packages are either masked or " + \
13277                                         "have missing dependencies:")
13278                                 out.eerror("")
13279                                 indent = "  "
13280                                 for dep in e.value:
13281                                         if dep.atom is None:
13282                                                 out.eerror(indent + "Masked package:")
13283                                                 out.eerror(2 * indent + str(dep.parent))
13284                                                 out.eerror("")
13285                                         else:
13286                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
13287                                                 out.eerror(2 * indent + str(dep.parent))
13288                                                 out.eerror("")
13289                                 msg = "The resume list contains packages " + \
13290                                         "that are either masked or have " + \
13291                                         "unsatisfied dependencies. " + \
13292                                         "Please restart/continue " + \
13293                                         "the operation manually, or use --skipfirst " + \
13294                                         "to skip the first package in the list and " + \
13295                                         "any other packages that may be " + \
13296                                         "masked or have missing dependencies."
13297                                 for line in wrap(msg, 72):
13298                                         out.eerror(line)
13299                         elif isinstance(e, portage.exception.PackageNotFound):
13300                                 out.eerror("An expected package is " + \
13301                                         "not available: %s" % str(e))
13302                                 out.eerror("")
13303                                 msg = "The resume list contains one or more " + \
13304                                         "packages that are no longer " + \
13305                                         "available. Please restart/continue " + \
13306                                         "the operation manually."
13307                                 for line in wrap(msg, 72):
13308                                         out.eerror(line)
13309                 else:
13310                         if show_spinner:
13311                                 print "\b\b... done!"
13312
13313                 if success:
13314                         if dropped_tasks:
13315                                 portage.writemsg("!!! One or more packages have been " + \
13316                                         "dropped due to\n" + \
13317                                         "!!! masking or unsatisfied dependencies:\n\n",
13318                                         noiselevel=-1)
13319                                 for task in dropped_tasks:
13320                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
13321                                 portage.writemsg("\n", noiselevel=-1)
13322                         del dropped_tasks
13323                 else:
13324                         if mydepgraph is not None:
13325                                 mydepgraph.display_problems()
13326                         if not (ask or pretend):
13327                                 # delete the current list and also the backup
13328                                 # since it's probably stale too.
13329                                 for k in ("resume", "resume_backup"):
13330                                         mtimedb.pop(k, None)
13331                                 mtimedb.commit()
13332
13333                         return 1
13334         else:
13335                 if ("--resume" in myopts):
13336                         print darkgreen("emerge: It seems we have nothing to resume...")
13337                         return os.EX_OK
13338
13339                 myparams = create_depgraph_params(myopts, myaction)
13340                 if "--quiet" not in myopts and "--nodeps" not in myopts:
13341                         print "Calculating dependencies  ",
13342                         sys.stdout.flush()
13343                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13344                 try:
13345                         retval, favorites = mydepgraph.select_files(myfiles)
13346                 except portage.exception.PackageNotFound, e:
13347                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13348                         return 1
13349                 except portage.exception.PackageSetNotFound, e:
13350                         root_config = trees[settings["ROOT"]]["root_config"]
13351                         display_missing_pkg_set(root_config, e.value)
13352                         return 1
13353                 if show_spinner:
13354                         print "\b\b... done!"
13355                 if not retval:
13356                         mydepgraph.display_problems()
13357                         return 1
13358
13359         if "--pretend" not in myopts and \
13360                 ("--ask" in myopts or "--tree" in myopts or \
13361                 "--verbose" in myopts) and \
13362                 not ("--quiet" in myopts and "--ask" not in myopts):
13363                 if "--resume" in myopts:
13364                         mymergelist = mydepgraph.altlist()
13365                         if len(mymergelist) == 0:
13366                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13367                                 return os.EX_OK
13368                         favorites = mtimedb["resume"]["favorites"]
13369                         retval = mydepgraph.display(
13370                                 mydepgraph.altlist(reversed=tree),
13371                                 favorites=favorites)
13372                         mydepgraph.display_problems()
13373                         if retval != os.EX_OK:
13374                                 return retval
13375                         prompt="Would you like to resume merging these packages?"
13376                 else:
13377                         retval = mydepgraph.display(
13378                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
13379                                 favorites=favorites)
13380                         mydepgraph.display_problems()
13381                         if retval != os.EX_OK:
13382                                 return retval
13383                         mergecount=0
13384                         for x in mydepgraph.altlist():
13385                                 if isinstance(x, Package) and x.operation == "merge":
13386                                         mergecount += 1
13387
13388                         if mergecount==0:
13389                                 sets = trees[settings["ROOT"]]["root_config"].sets
13390                                 world_candidates = None
13391                                 if "--noreplace" in myopts and \
13392                                         not oneshot and favorites:
13393                                         # Sets that are not world candidates are filtered
13394                                         # out here since the favorites list needs to be
13395                                         # complete for depgraph.loadResumeCommand() to
13396                                         # operate correctly.
13397                                         world_candidates = [x for x in favorites \
13398                                                 if not (x.startswith(SETPREFIX) and \
13399                                                 not sets[x[1:]].world_candidate)]
13400                                 if "--noreplace" in myopts and \
13401                                         not oneshot and world_candidates:
13402                                         print
13403                                         for x in world_candidates:
13404                                                 print " %s %s" % (good("*"), x)
13405                                         prompt="Would you like to add these packages to your world favorites?"
13406                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13407                                         prompt="Nothing to merge; would you like to auto-clean packages?"
13408                                 else:
13409                                         print
13410                                         print "Nothing to merge; quitting."
13411                                         print
13412                                         return os.EX_OK
13413                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13414                                 prompt="Would you like to fetch the source files for these packages?"
13415                         else:
13416                                 prompt="Would you like to merge these packages?"
13417                 print
13418                 if "--ask" in myopts and userquery(prompt) == "No":
13419                         print
13420                         print "Quitting."
13421                         print
13422                         return os.EX_OK
13423                 # Don't ask again (e.g. when auto-cleaning packages after merge)
13424                 myopts.pop("--ask", None)
13425
13426         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13427                 if ("--resume" in myopts):
13428                         mymergelist = mydepgraph.altlist()
13429                         if len(mymergelist) == 0:
13430                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13431                                 return os.EX_OK
13432                         favorites = mtimedb["resume"]["favorites"]
13433                         retval = mydepgraph.display(
13434                                 mydepgraph.altlist(reversed=tree),
13435                                 favorites=favorites)
13436                         mydepgraph.display_problems()
13437                         if retval != os.EX_OK:
13438                                 return retval
13439                 else:
13440                         retval = mydepgraph.display(
13441                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
13442                                 favorites=favorites)
13443                         mydepgraph.display_problems()
13444                         if retval != os.EX_OK:
13445                                 return retval
13446                         if "--buildpkgonly" in myopts:
13447                                 graph_copy = mydepgraph.digraph.clone()
13448                                 for node in list(graph_copy.order):
13449                                         if not isinstance(node, Package):
13450                                                 graph_copy.remove(node)
13451                                 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13452                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
13453                                         print "!!! You have to merge the dependencies before you can build this package.\n"
13454                                         return 1
13455         else:
13456                 if "--buildpkgonly" in myopts:
13457                         graph_copy = mydepgraph.digraph.clone()
13458                         for node in list(graph_copy.order):
13459                                 if not isinstance(node, Package):
13460                                         graph_copy.remove(node)
13461                         if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13462                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13463                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13464                                 return 1
13465
13466                 if ("--resume" in myopts):
13467                         favorites=mtimedb["resume"]["favorites"]
13468                         mymergelist = mydepgraph.altlist()
13469                         mydepgraph.break_refs(mymergelist)
13470                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
13471                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13472                         del mydepgraph, mymergelist
13473                         clear_caches(trees)
13474
13475                         retval = mergetask.merge()
13476                         merge_count = mergetask.curval
13477                 else:
13478                         if "resume" in mtimedb and \
13479                         "mergelist" in mtimedb["resume"] and \
13480                         len(mtimedb["resume"]["mergelist"]) > 1:
13481                                 mtimedb["resume_backup"] = mtimedb["resume"]
13482                                 del mtimedb["resume"]
13483                                 mtimedb.commit()
13484                         mtimedb["resume"]={}
13485                         # Stored as a dict starting with portage-2.2_rc7, and supported
13486                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13487                         # a list type for options.
13488                         mtimedb["resume"]["myopts"] = myopts.copy()
13489
13490                         # Convert Atom instances to plain str since the mtimedb loader
13491                         # sets unpickler.find_global = None which causes unpickler.load()
13492                         # to raise the following exception:
13493                         #
13494                         # cPickle.UnpicklingError: Global and instance pickles are not supported.
13495                         #
13496                         # TODO: Maybe stop setting find_global = None, or find some other
13497                         # way to avoid accidental triggering of the above UnpicklingError.
13498                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13499
13500                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13501                                 for pkgline in mydepgraph.altlist():
13502                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13503                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13504                                                 tmpsettings = portage.config(clone=settings)
13505                                                 edebug = 0
13506                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
13507                                                         edebug = 1
13508                                                 retval = portage.doebuild(
13509                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
13510                                                         ("--pretend" in myopts),
13511                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13512                                                         tree="porttree")
13513
13514                         pkglist = mydepgraph.altlist()
13515                         mydepgraph.saveNomergeFavorites()
13516                         mydepgraph.break_refs(pkglist)
13517                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
13518                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13519                         del mydepgraph, pkglist
13520                         clear_caches(trees)
13521
13522                         retval = mergetask.merge()
13523                         merge_count = mergetask.curval
13524
13525                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13526                         if "yes" == settings.get("AUTOCLEAN"):
13527                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
13528                                 unmerge(trees[settings["ROOT"]]["root_config"],
13529                                         myopts, "clean", [],
13530                                         ldpath_mtimes, autoclean=1)
13531                         else:
13532                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13533                                         + " AUTOCLEAN is disabled.  This can cause serious"
13534                                         + " problems due to overlapping packages.\n")
13535                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
13536
13537                 return retval
13538
13539 def multiple_actions(action1, action2):
13540         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13541         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
13542         sys.exit(1)
13543
13544 def insert_optional_args(args):
13545         """
13546         Parse optional arguments and insert a value if one has
13547         not been provided. This is done before feeding the args
13548         to the optparse parser since that parser does not support
13549         this feature natively.
13550         """
13551
13552         new_args = []
13553         jobs_opts = ("-j", "--jobs")
13554         arg_stack = args[:]
13555         arg_stack.reverse()
13556         while arg_stack:
13557                 arg = arg_stack.pop()
13558
13559                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
13560                 if not (short_job_opt or arg in jobs_opts):
13561                         new_args.append(arg)
13562                         continue
13563
13564                 # Insert an empty placeholder in order to
13565                 # satisfy the requirements of optparse.
13566
13567                 new_args.append("--jobs")
13568                 job_count = None
13569                 saved_opts = None
13570                 if short_job_opt and len(arg) > 2:
13571                         if arg[:2] == "-j":
13572                                 try:
13573                                         job_count = int(arg[2:])
13574                                 except ValueError:
13575                                         saved_opts = arg[2:]
13576                         else:
13577                                 job_count = "True"
13578                                 saved_opts = arg[1:].replace("j", "")
13579
13580                 if job_count is None and arg_stack:
13581                         try:
13582                                 job_count = int(arg_stack[-1])
13583                         except ValueError:
13584                                 pass
13585                         else:
13586                                 # Discard the job count from the stack
13587                                 # since we're consuming it here.
13588                                 arg_stack.pop()
13589
13590                 if job_count is None:
13591                         # unlimited number of jobs
13592                         new_args.append("True")
13593                 else:
13594                         new_args.append(str(job_count))
13595
13596                 if saved_opts is not None:
13597                         new_args.append("-" + saved_opts)
13598
13599         return new_args
13600
13601 def parse_opts(tmpcmdline, silent=False):
13602         myaction=None
13603         myopts = {}
13604         myfiles=[]
13605
13606         global actions, options, shortmapping
13607
13608         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13609         argument_options = {
13610                 "--config-root": {
13611                         "help":"specify the location for portage configuration files",
13612                         "action":"store"
13613                 },
13614                 "--color": {
13615                         "help":"enable or disable color output",
13616                         "type":"choice",
13617                         "choices":("y", "n")
13618                 },
13619
13620                 "--jobs": {
13621
13622                         "help"   : "Specifies the number of packages to build " + \
13623                                 "simultaneously.",
13624
13625                         "action" : "store"
13626                 },
13627
13628                 "--load-average": {
13629
13630                         "help"   :"Specifies that no new builds should be started " + \
13631                                 "if there are other builds running and the load average " + \
13632                                 "is at least LOAD (a floating-point number).",
13633
13634                         "action" : "store"
13635                 },
13636
13637                 "--with-bdeps": {
13638                         "help":"include unnecessary build time dependencies",
13639                         "type":"choice",
13640                         "choices":("y", "n")
13641                 },
13642                 "--reinstall": {
13643                         "help":"specify conditions to trigger package reinstallation",
13644                         "type":"choice",
13645                         "choices":["changed-use"]
13646                 }
13647         }
13648
13649         from optparse import OptionParser
13650         parser = OptionParser()
13651         if parser.has_option("--help"):
13652                 parser.remove_option("--help")
13653
13654         for action_opt in actions:
13655                 parser.add_option("--" + action_opt, action="store_true",
13656                         dest=action_opt.replace("-", "_"), default=False)
13657         for myopt in options:
13658                 parser.add_option(myopt, action="store_true",
13659                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
13660         for shortopt, longopt in shortmapping.iteritems():
13661                 parser.add_option("-" + shortopt, action="store_true",
13662                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
13663         for myalias, myopt in longopt_aliases.iteritems():
13664                 parser.add_option(myalias, action="store_true",
13665                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
13666
13667         for myopt, kwargs in argument_options.iteritems():
13668                 parser.add_option(myopt,
13669                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
13670
13671         tmpcmdline = insert_optional_args(tmpcmdline)
13672
13673         myoptions, myargs = parser.parse_args(args=tmpcmdline)
13674
13675         if myoptions.jobs:
13676                 jobs = None
13677                 if myoptions.jobs == "True":
13678                         jobs = True
13679                 else:
13680                         try:
13681                                 jobs = int(myoptions.jobs)
13682                         except ValueError:
13683                                 jobs = -1
13684
13685                 if jobs is not True and \
13686                         jobs < 1:
13687                         jobs = None
13688                         if not silent:
13689                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
13690                                         (myoptions.jobs,), noiselevel=-1)
13691
13692                 myoptions.jobs = jobs
13693
13694         if myoptions.load_average:
13695                 try:
13696                         load_average = float(myoptions.load_average)
13697                 except ValueError:
13698                         load_average = 0.0
13699
13700                 if load_average <= 0.0:
13701                         load_average = None
13702                         if not silent:
13703                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
13704                                         (myoptions.load_average,), noiselevel=-1)
13705
13706                 myoptions.load_average = load_average
13707
13708         for myopt in options:
13709                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
13710                 if v:
13711                         myopts[myopt] = True
13712
13713         for myopt in argument_options:
13714                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
13715                 if v is not None:
13716                         myopts[myopt] = v
13717
13718         for action_opt in actions:
13719                 v = getattr(myoptions, action_opt.replace("-", "_"))
13720                 if v:
13721                         if myaction:
13722                                 multiple_actions(myaction, action_opt)
13723                                 sys.exit(1)
13724                         myaction = action_opt
13725
13726         myfiles += myargs
13727
13728         return myaction, myopts, myfiles
13729
13730 def validate_ebuild_environment(trees):
13731         for myroot in trees:
13732                 settings = trees[myroot]["vartree"].settings
13733                 settings.validate()
13734
13735 def clear_caches(trees):
13736         for d in trees.itervalues():
13737                 d["porttree"].dbapi.melt()
13738                 d["porttree"].dbapi._aux_cache.clear()
13739                 d["bintree"].dbapi._aux_cache.clear()
13740                 d["bintree"].dbapi._clear_cache()
13741                 d["vartree"].dbapi.linkmap._clear_cache()
13742         portage.dircache.clear()
13743         gc.collect()
13744
13745 def load_emerge_config(trees=None):
13746         kwargs = {}
13747         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
13748                 v = os.environ.get(envvar, None)
13749                 if v and v.strip():
13750                         kwargs[k] = v
13751         trees = portage.create_trees(trees=trees, **kwargs)
13752
13753         for root, root_trees in trees.iteritems():
13754                 settings = root_trees["vartree"].settings
13755                 setconfig = load_default_config(settings, root_trees)
13756                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
13757
13758         settings = trees["/"]["vartree"].settings
13759
13760         for myroot in trees:
13761                 if myroot != "/":
13762                         settings = trees[myroot]["vartree"].settings
13763                         break
13764
13765         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
13766         mtimedb = portage.MtimeDB(mtimedbfile)
13767         
13768         return settings, trees, mtimedb
13769
13770 def adjust_config(myopts, settings):
13771         """Make emerge specific adjustments to the config."""
13772
13773         # To enhance usability, make some vars case insensitive by forcing them to
13774         # lower case.
13775         for myvar in ("AUTOCLEAN", "NOCOLOR"):
13776                 if myvar in settings:
13777                         settings[myvar] = settings[myvar].lower()
13778                         settings.backup_changes(myvar)
13779         del myvar
13780
13781         # Kill noauto as it will break merges otherwise.
13782         if "noauto" in settings.features:
13783                 while "noauto" in settings.features:
13784                         settings.features.remove("noauto")
13785                 settings["FEATURES"] = " ".join(settings.features)
13786                 settings.backup_changes("FEATURES")
13787
13788         CLEAN_DELAY = 5
13789         try:
13790                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
13791         except ValueError, e:
13792                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13793                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
13794                         settings["CLEAN_DELAY"], noiselevel=-1)
13795         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
13796         settings.backup_changes("CLEAN_DELAY")
13797
13798         EMERGE_WARNING_DELAY = 10
13799         try:
13800                 EMERGE_WARNING_DELAY = int(settings.get(
13801                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
13802         except ValueError, e:
13803                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13804                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
13805                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
13806         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
13807         settings.backup_changes("EMERGE_WARNING_DELAY")
13808
13809         if "--quiet" in myopts:
13810                 settings["PORTAGE_QUIET"]="1"
13811                 settings.backup_changes("PORTAGE_QUIET")
13812
13813         if "--verbose" in myopts:
13814                 settings["PORTAGE_VERBOSE"] = "1"
13815                 settings.backup_changes("PORTAGE_VERBOSE")
13816
13817         # Set so that configs will be merged regardless of remembered status
13818         if ("--noconfmem" in myopts):
13819                 settings["NOCONFMEM"]="1"
13820                 settings.backup_changes("NOCONFMEM")
13821
13822         # Set various debug markers... They should be merged somehow.
13823         PORTAGE_DEBUG = 0
13824         try:
13825                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
13826                 if PORTAGE_DEBUG not in (0, 1):
13827                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
13828                                 PORTAGE_DEBUG, noiselevel=-1)
13829                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
13830                                 noiselevel=-1)
13831                         PORTAGE_DEBUG = 0
13832         except ValueError, e:
13833                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13834                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
13835                         settings["PORTAGE_DEBUG"], noiselevel=-1)
13836                 del e
13837         if "--debug" in myopts:
13838                 PORTAGE_DEBUG = 1
13839         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
13840         settings.backup_changes("PORTAGE_DEBUG")
13841
13842         if settings.get("NOCOLOR") not in ("yes","true"):
13843                 portage.output.havecolor = 1
13844
13845         """The explicit --color < y | n > option overrides the NOCOLOR environment
13846         variable and stdout auto-detection."""
13847         if "--color" in myopts:
13848                 if "y" == myopts["--color"]:
13849                         portage.output.havecolor = 1
13850                         settings["NOCOLOR"] = "false"
13851                 else:
13852                         portage.output.havecolor = 0
13853                         settings["NOCOLOR"] = "true"
13854                 settings.backup_changes("NOCOLOR")
13855         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
13856                 portage.output.havecolor = 0
13857                 settings["NOCOLOR"] = "true"
13858                 settings.backup_changes("NOCOLOR")
13859
13860 def apply_priorities(settings):
13861         ionice(settings)
13862         nice(settings)
13863
13864 def nice(settings):
13865         try:
13866                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
13867         except (OSError, ValueError), e:
13868                 out = portage.output.EOutput()
13869                 out.eerror("Failed to change nice value to '%s'" % \
13870                         settings["PORTAGE_NICENESS"])
13871                 out.eerror("%s\n" % str(e))
13872
13873 def ionice(settings):
13874
13875         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
13876         if ionice_cmd:
13877                 ionice_cmd = shlex.split(ionice_cmd)
13878         if not ionice_cmd:
13879                 return
13880
13881         from portage.util import varexpand
13882         variables = {"PID" : str(os.getpid())}
13883         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
13884
13885         try:
13886                 rval = portage.process.spawn(cmd, env=os.environ)
13887         except portage.exception.CommandNotFound:
13888                 # The OS kernel probably doesn't support ionice,
13889                 # so return silently.
13890                 return
13891
13892         if rval != os.EX_OK:
13893                 out = portage.output.EOutput()
13894                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
13895                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
13896
13897 def display_missing_pkg_set(root_config, set_name):
13898
13899         msg = []
13900         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
13901                 "The following sets exist:") % \
13902                 colorize("INFORM", set_name))
13903         msg.append("")
13904
13905         for s in sorted(root_config.sets):
13906                 msg.append("    %s" % s)
13907         msg.append("")
13908
13909         writemsg_level("".join("%s\n" % l for l in msg),
13910                 level=logging.ERROR, noiselevel=-1)
13911
13912 def expand_set_arguments(myfiles, myaction, root_config):
13913         retval = os.EX_OK
13914         setconfig = root_config.setconfig
13915
13916         sets = setconfig.getSets()
13917
13918         # In order to know exactly which atoms/sets should be added to the
13919         # world file, the depgraph performs set expansion later. It will get
13920         # confused about where the atoms came from if it's not allowed to
13921         # expand them itself.
13922         do_not_expand = (None, )
13923         newargs = []
13924         for a in myfiles:
13925                 if a in ("system", "world"):
13926                         newargs.append(SETPREFIX+a)
13927                 else:
13928                         newargs.append(a)
13929         myfiles = newargs
13930         del newargs
13931         newargs = []
13932
13933         # separators for set arguments
13934         ARG_START = "{"
13935         ARG_END = "}"
13936
13937         # WARNING: all operators must be of equal length
13938         IS_OPERATOR = "/@"
13939         DIFF_OPERATOR = "-@"
13940         UNION_OPERATOR = "+@"
13941         
13942         for i in range(0, len(myfiles)):
13943                 if myfiles[i].startswith(SETPREFIX):
13944                         start = 0
13945                         end = 0
13946                         x = myfiles[i][len(SETPREFIX):]
13947                         newset = ""
13948                         while x:
13949                                 start = x.find(ARG_START)
13950                                 end = x.find(ARG_END)
13951                                 if start > 0 and start < end:
13952                                         namepart = x[:start]
13953                                         argpart = x[start+1:end]
13954                                 
13955                                         # TODO: implement proper quoting
13956                                         args = argpart.split(",")
13957                                         options = {}
13958                                         for a in args:
13959                                                 if "=" in a:
13960                                                         k, v  = a.split("=", 1)
13961                                                         options[k] = v
13962                                                 else:
13963                                                         options[a] = "True"
13964                                         setconfig.update(namepart, options)
13965                                         newset += (x[:start-len(namepart)]+namepart)
13966                                         x = x[end+len(ARG_END):]
13967                                 else:
13968                                         newset += x
13969                                         x = ""
13970                         myfiles[i] = SETPREFIX+newset
13971                                 
13972         sets = setconfig.getSets()
13973
13974         # display errors that occured while loading the SetConfig instance
13975         for e in setconfig.errors:
13976                 print colorize("BAD", "Error during set creation: %s" % e)
13977         
13978         # emerge relies on the existance of sets with names "world" and "system"
13979         required_sets = ("world", "system")
13980
13981         for s in required_sets:
13982                 if s not in sets:
13983                         msg = ["emerge: incomplete set configuration, " + \
13984                                 "no \"%s\" set defined" % s]
13985                         msg.append("        sets defined: %s" % ", ".join(sets))
13986                         for line in msg:
13987                                 sys.stderr.write(line + "\n")
13988                         retval = 1
13989         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
13990
13991         for a in myfiles:
13992                 if a.startswith(SETPREFIX):
13993                         # support simple set operations (intersection, difference and union)
13994                         # on the commandline. Expressions are evaluated strictly left-to-right
13995                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
13996                                 expression = a[len(SETPREFIX):]
13997                                 expr_sets = []
13998                                 expr_ops = []
13999                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14000                                         is_pos = expression.rfind(IS_OPERATOR)
14001                                         diff_pos = expression.rfind(DIFF_OPERATOR)
14002                                         union_pos = expression.rfind(UNION_OPERATOR)
14003                                         op_pos = max(is_pos, diff_pos, union_pos)
14004                                         s1 = expression[:op_pos]
14005                                         s2 = expression[op_pos+len(IS_OPERATOR):]
14006                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14007                                         if not s2 in sets:
14008                                                 display_missing_pkg_set(root_config, s2)
14009                                                 return (None, 1)
14010                                         expr_sets.insert(0, s2)
14011                                         expr_ops.insert(0, op)
14012                                         expression = s1
14013                                 if not expression in sets:
14014                                         display_missing_pkg_set(root_config, expression)
14015                                         return (None, 1)
14016                                 expr_sets.insert(0, expression)
14017                                 result = set(setconfig.getSetAtoms(expression))
14018                                 for i in range(0, len(expr_ops)):
14019                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
14020                                         if expr_ops[i] == IS_OPERATOR:
14021                                                 result.intersection_update(s2)
14022                                         elif expr_ops[i] == DIFF_OPERATOR:
14023                                                 result.difference_update(s2)
14024                                         elif expr_ops[i] == UNION_OPERATOR:
14025                                                 result.update(s2)
14026                                         else:
14027                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14028                                 newargs.extend(result)
14029                         else:                   
14030                                 s = a[len(SETPREFIX):]
14031                                 if s not in sets:
14032                                         display_missing_pkg_set(root_config, s)
14033                                         return (None, 1)
14034                                 setconfig.active.append(s)
14035                                 try:
14036                                         set_atoms = setconfig.getSetAtoms(s)
14037                                 except portage.exception.PackageSetNotFound, e:
14038                                         writemsg_level(("emerge: the given set '%s' " + \
14039                                                 "contains a non-existent set named '%s'.\n") % \
14040                                                 (s, e), level=logging.ERROR, noiselevel=-1)
14041                                         return (None, 1)
14042                                 if myaction in unmerge_actions and \
14043                                                 not sets[s].supportsOperation("unmerge"):
14044                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
14045                                                 "not support unmerge operations\n")
14046                                         retval = 1
14047                                 elif not set_atoms:
14048                                         print "emerge: '%s' is an empty set" % s
14049                                 elif myaction not in do_not_expand:
14050                                         newargs.extend(set_atoms)
14051                                 else:
14052                                         newargs.append(SETPREFIX+s)
14053                                 for e in sets[s].errors:
14054                                         print e
14055                 else:
14056                         newargs.append(a)
14057         return (newargs, retval)
14058
14059 def repo_name_check(trees):
14060         missing_repo_names = set()
14061         for root, root_trees in trees.iteritems():
14062                 if "porttree" in root_trees:
14063                         portdb = root_trees["porttree"].dbapi
14064                         missing_repo_names.update(portdb.porttrees)
14065                         repos = portdb.getRepositories()
14066                         for r in repos:
14067                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
14068
14069         if missing_repo_names:
14070                 msg = []
14071                 msg.append("WARNING: One or more repositories " + \
14072                         "have missing repo_name entries:")
14073                 msg.append("")
14074                 for p in missing_repo_names:
14075                         msg.append("\t%s/profiles/repo_name" % (p,))
14076                 msg.append("")
14077                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14078                         "should be a plain text file containing a unique " + \
14079                         "name for the repository on the first line.", 70))
14080                 writemsg_level("".join("%s\n" % l for l in msg),
14081                         level=logging.WARNING, noiselevel=-1)
14082
14083         return bool(missing_repo_names)
14084
14085 def config_protect_check(trees):
14086         for root, root_trees in trees.iteritems():
14087                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14088                         msg = "!!! CONFIG_PROTECT is empty"
14089                         if root != "/":
14090                                 msg += " for '%s'" % root
14091                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14092
14093 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14094
14095         if "--quiet" in myopts:
14096                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14097                 print "!!! one of the following fully-qualified ebuild names instead:\n"
14098                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14099                         print "    " + colorize("INFORM", cp)
14100                 return
14101
14102         s = search(root_config, spinner, "--searchdesc" in myopts,
14103                 "--quiet" not in myopts, "--usepkg" in myopts,
14104                 "--usepkgonly" in myopts)
14105         null_cp = portage.dep_getkey(insert_category_into_atom(
14106                 arg, "null"))
14107         cat, atom_pn = portage.catsplit(null_cp)
14108         s.searchkey = atom_pn
14109         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14110                 s.addCP(cp)
14111         s.output()
14112         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14113         print "!!! one of the above fully-qualified ebuild names instead.\n"
14114
14115 def profile_check(trees, myaction, myopts):
14116         if myaction in ("info", "sync"):
14117                 return os.EX_OK
14118         elif "--version" in myopts or "--help" in myopts:
14119                 return os.EX_OK
14120         for root, root_trees in trees.iteritems():
14121                 if root_trees["root_config"].settings.profiles:
14122                         continue
14123                 # generate some profile related warning messages
14124                 validate_ebuild_environment(trees)
14125                 msg = "If you have just changed your profile configuration, you " + \
14126                         "should revert back to the previous configuration. Due to " + \
14127                         "your current profile being invalid, allowed actions are " + \
14128                         "limited to --help, --info, --sync, and --version."
14129                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14130                         level=logging.ERROR, noiselevel=-1)
14131                 return 1
14132         return os.EX_OK
14133
14134 def emerge_main():
14135         global portage  # NFC why this is necessary now - genone
14136         portage._disable_legacy_globals()
14137         # Disable color until we're sure that it should be enabled (after
14138         # EMERGE_DEFAULT_OPTS has been parsed).
14139         portage.output.havecolor = 0
14140         # This first pass is just for options that need to be known as early as
14141         # possible, such as --config-root.  They will be parsed again later,
14142         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14143         # the value of --config-root).
14144         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14145         if "--debug" in myopts:
14146                 os.environ["PORTAGE_DEBUG"] = "1"
14147         if "--config-root" in myopts:
14148                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14149
14150         # Portage needs to ensure a sane umask for the files it creates.
14151         os.umask(022)
14152         settings, trees, mtimedb = load_emerge_config()
14153         portdb = trees[settings["ROOT"]]["porttree"].dbapi
14154         rval = profile_check(trees, myaction, myopts)
14155         if rval != os.EX_OK:
14156                 return rval
14157
14158         if portage._global_updates(trees, mtimedb["updates"]):
14159                 mtimedb.commit()
14160                 # Reload the whole config from scratch.
14161                 settings, trees, mtimedb = load_emerge_config(trees=trees)
14162                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14163
14164         xterm_titles = "notitles" not in settings.features
14165
14166         tmpcmdline = []
14167         if "--ignore-default-opts" not in myopts:
14168                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14169         tmpcmdline.extend(sys.argv[1:])
14170         myaction, myopts, myfiles = parse_opts(tmpcmdline)
14171
14172         if "--digest" in myopts:
14173                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14174                 # Reload the whole config from scratch so that the portdbapi internal
14175                 # config is updated with new FEATURES.
14176                 settings, trees, mtimedb = load_emerge_config(trees=trees)
14177                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14178
14179         for myroot in trees:
14180                 mysettings =  trees[myroot]["vartree"].settings
14181                 mysettings.unlock()
14182                 adjust_config(myopts, mysettings)
14183                 mysettings["PORTAGE_COUNTER_HASH"] = \
14184                         trees[myroot]["vartree"].dbapi._counter_hash()
14185                 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14186                 mysettings.lock()
14187                 del myroot, mysettings
14188
14189         apply_priorities(settings)
14190
14191         spinner = stdout_spinner()
14192         if "candy" in settings.features:
14193                 spinner.update = spinner.update_scroll
14194
14195         if "--quiet" not in myopts:
14196                 portage.deprecated_profile_check()
14197                 repo_name_check(trees)
14198                 config_protect_check(trees)
14199
14200         eclasses_overridden = {}
14201         for mytrees in trees.itervalues():
14202                 mydb = mytrees["porttree"].dbapi
14203                 # Freeze the portdbapi for performance (memoize all xmatch results).
14204                 mydb.freeze()
14205                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14206         del mytrees, mydb
14207
14208         if eclasses_overridden and \
14209                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14210                 prefix = bad(" * ")
14211                 if len(eclasses_overridden) == 1:
14212                         writemsg(prefix + "Overlay eclass overrides " + \
14213                                 "eclass from PORTDIR:\n", noiselevel=-1)
14214                 else:
14215                         writemsg(prefix + "Overlay eclasses override " + \
14216                                 "eclasses from PORTDIR:\n", noiselevel=-1)
14217                 writemsg(prefix + "\n", noiselevel=-1)
14218                 for eclass_name in sorted(eclasses_overridden):
14219                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
14220                                 (eclasses_overridden[eclass_name], eclass_name),
14221                                 noiselevel=-1)
14222                 writemsg(prefix + "\n", noiselevel=-1)
14223                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14224                 "because it will trigger invalidation of cached ebuild metadata " + \
14225                 "that is distributed with the portage tree. If you must " + \
14226                 "override eclasses from PORTDIR then you are advised to add " + \
14227                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14228                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14229                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14230                 "you would like to disable this warning."
14231                 from textwrap import wrap
14232                 for line in wrap(msg, 72):
14233                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14234
14235         if "moo" in myfiles:
14236                 print """
14237
14238   Larry loves Gentoo (""" + platform.system() + """)
14239
14240  _______________________
14241 < Have you mooed today? >
14242  -----------------------
14243         \   ^__^
14244          \  (oo)\_______
14245             (__)\       )\/\ 
14246                 ||----w |
14247                 ||     ||
14248
14249 """
14250
14251         for x in myfiles:
14252                 ext = os.path.splitext(x)[1]
14253                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14254                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14255                         break
14256
14257         root_config = trees[settings["ROOT"]]["root_config"]
14258         if myaction == "list-sets":
14259                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14260                 sys.stdout.flush()
14261                 return os.EX_OK
14262
14263         # only expand sets for actions taking package arguments
14264         oldargs = myfiles[:]
14265         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14266                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14267                 if retval != os.EX_OK:
14268                         return retval
14269
14270                 # Need to handle empty sets specially, otherwise emerge will react 
14271                 # with the help message for empty argument lists
14272                 if oldargs and not myfiles:
14273                         print "emerge: no targets left after set expansion"
14274                         return 0
14275
14276         if ("--tree" in myopts) and ("--columns" in myopts):
14277                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14278                 return 1
14279
14280         if ("--quiet" in myopts):
14281                 spinner.update = spinner.update_quiet
14282                 portage.util.noiselimit = -1
14283
14284         # Always create packages if FEATURES=buildpkg
14285         # Imply --buildpkg if --buildpkgonly
14286         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14287                 if "--buildpkg" not in myopts:
14288                         myopts["--buildpkg"] = True
14289
14290         # Also allow -S to invoke search action (-sS)
14291         if ("--searchdesc" in myopts):
14292                 if myaction and myaction != "search":
14293                         myfiles.append(myaction)
14294                 if "--search" not in myopts:
14295                         myopts["--search"] = True
14296                 myaction = "search"
14297
14298         # Always try and fetch binary packages if FEATURES=getbinpkg
14299         if ("getbinpkg" in settings.features):
14300                 myopts["--getbinpkg"] = True
14301
14302         if "--buildpkgonly" in myopts:
14303                 # --buildpkgonly will not merge anything, so
14304                 # it cancels all binary package options.
14305                 for opt in ("--getbinpkg", "--getbinpkgonly",
14306                         "--usepkg", "--usepkgonly"):
14307                         myopts.pop(opt, None)
14308
14309         if "--fetch-all-uri" in myopts:
14310                 myopts["--fetchonly"] = True
14311
14312         if "--skipfirst" in myopts and "--resume" not in myopts:
14313                 myopts["--resume"] = True
14314
14315         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14316                 myopts["--usepkgonly"] = True
14317
14318         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14319                 myopts["--getbinpkg"] = True
14320
14321         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14322                 myopts["--usepkg"] = True
14323
14324         # Also allow -K to apply --usepkg/-k
14325         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14326                 myopts["--usepkg"] = True
14327
14328         # Allow -p to remove --ask
14329         if ("--pretend" in myopts) and ("--ask" in myopts):
14330                 print ">>> --pretend disables --ask... removing --ask from options."
14331                 del myopts["--ask"]
14332
14333         # forbid --ask when not in a terminal
14334         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14335         if ("--ask" in myopts) and (not sys.stdin.isatty()):
14336                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14337                         noiselevel=-1)
14338                 return 1
14339
14340         if settings.get("PORTAGE_DEBUG", "") == "1":
14341                 spinner.update = spinner.update_quiet
14342                 portage.debug=1
14343                 if "python-trace" in settings.features:
14344                         import portage.debug
14345                         portage.debug.set_trace(True)
14346
14347         if not ("--quiet" in myopts):
14348                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14349                         spinner.update = spinner.update_basic
14350
14351         if "--version" in myopts:
14352                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14353                         settings.profile_path, settings["CHOST"],
14354                         trees[settings["ROOT"]]["vartree"].dbapi)
14355                 return 0
14356         elif "--help" in myopts:
14357                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14358                 return 0
14359
14360         if "--debug" in myopts:
14361                 print "myaction", myaction
14362                 print "myopts", myopts
14363
14364         if not myaction and not myfiles and "--resume" not in myopts:
14365                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14366                 return 1
14367
14368         pretend = "--pretend" in myopts
14369         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14370         buildpkgonly = "--buildpkgonly" in myopts
14371
14372         # check if root user is the current user for the actions where emerge needs this
14373         if portage.secpass < 2:
14374                 # We've already allowed "--version" and "--help" above.
14375                 if "--pretend" not in myopts and myaction not in ("search","info"):
14376                         need_superuser = not \
14377                                 (fetchonly or \
14378                                 (buildpkgonly and secpass >= 1) or \
14379                                 myaction in ("metadata", "regen") or \
14380                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
14381                         if portage.secpass < 1 or \
14382                                 need_superuser:
14383                                 if need_superuser:
14384                                         access_desc = "superuser"
14385                                 else:
14386                                         access_desc = "portage group"
14387                                 # Always show portage_group_warning() when only portage group
14388                                 # access is required but the user is not in the portage group.
14389                                 from portage.data import portage_group_warning
14390                                 if "--ask" in myopts:
14391                                         myopts["--pretend"] = True
14392                                         del myopts["--ask"]
14393                                         print ("%s access is required... " + \
14394                                                 "adding --pretend to options.\n") % access_desc
14395                                         if portage.secpass < 1 and not need_superuser:
14396                                                 portage_group_warning()
14397                                 else:
14398                                         sys.stderr.write(("emerge: %s access is " + \
14399                                                 "required.\n\n") % access_desc)
14400                                         if portage.secpass < 1 and not need_superuser:
14401                                                 portage_group_warning()
14402                                         return 1
14403
14404         disable_emergelog = False
14405         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
14406                 if x in myopts:
14407                         disable_emergelog = True
14408                         break
14409         if myaction in ("search", "info"):
14410                 disable_emergelog = True
14411         if disable_emergelog:
14412                 """ Disable emergelog for everything except build or unmerge
14413                 operations.  This helps minimize parallel emerge.log entries that can
14414                 confuse log parsers.  We especially want it disabled during
14415                 parallel-fetch, which uses --resume --fetchonly."""
14416                 global emergelog
14417                 def emergelog(*pargs, **kargs):
14418                         pass
14419
14420         if not "--pretend" in myopts:
14421                 emergelog(xterm_titles, "Started emerge on: "+\
14422                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
14423                 myelogstr=""
14424                 if myopts:
14425                         myelogstr=" ".join(myopts)
14426                 if myaction:
14427                         myelogstr+=" "+myaction
14428                 if myfiles:
14429                         myelogstr += " " + " ".join(oldargs)
14430                 emergelog(xterm_titles, " *** emerge " + myelogstr)
14431         del oldargs
14432
14433         def emergeexitsig(signum, frame):
14434                 signal.signal(signal.SIGINT, signal.SIG_IGN)
14435                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14436                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
14437                 sys.exit(100+signum)
14438         signal.signal(signal.SIGINT, emergeexitsig)
14439         signal.signal(signal.SIGTERM, emergeexitsig)
14440
14441         def emergeexit():
14442                 """This gets out final log message in before we quit."""
14443                 if "--pretend" not in myopts:
14444                         emergelog(xterm_titles, " *** terminating.")
14445                 if "notitles" not in settings.features:
14446                         xtermTitleReset()
14447         portage.atexit_register(emergeexit)
14448
14449         if myaction in ("config", "metadata", "regen", "sync"):
14450                 if "--pretend" in myopts:
14451                         sys.stderr.write(("emerge: The '%s' action does " + \
14452                                 "not support '--pretend'.\n") % myaction)
14453                         return 1
14454
14455         if "sync" == myaction:
14456                 return action_sync(settings, trees, mtimedb, myopts, myaction)
14457         elif "metadata" == myaction:
14458                 action_metadata(settings, portdb, myopts)
14459         elif myaction=="regen":
14460                 validate_ebuild_environment(trees)
14461                 action_regen(settings, portdb, myopts.get("--jobs"),
14462                         myopts.get("--load-average"))
14463         # HELP action
14464         elif "config"==myaction:
14465                 validate_ebuild_environment(trees)
14466                 action_config(settings, trees, myopts, myfiles)
14467
14468         # SEARCH action
14469         elif "search"==myaction:
14470                 validate_ebuild_environment(trees)
14471                 action_search(trees[settings["ROOT"]]["root_config"],
14472                         myopts, myfiles, spinner)
14473         elif myaction in ("clean", "unmerge") or \
14474                 (myaction == "prune" and "--nodeps" in myopts):
14475                 validate_ebuild_environment(trees)
14476
14477                 # Ensure atoms are valid before calling unmerge().
14478                 # For backward compat, leading '=' is not required.
14479                 for x in myfiles:
14480                         if is_valid_package_atom(x) or \
14481                                 is_valid_package_atom("=" + x):
14482                                 continue
14483                         msg = []
14484                         msg.append("'%s' is not a valid package atom." % (x,))
14485                         msg.append("Please check ebuild(5) for full details.")
14486                         writemsg_level("".join("!!! %s\n" % line for line in msg),
14487                                 level=logging.ERROR, noiselevel=-1)
14488                         return 1
14489
14490                 # When given a list of atoms, unmerge
14491                 # them in the order given.
14492                 ordered = myaction == "unmerge"
14493                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14494                         mtimedb["ldpath"], ordered=ordered):
14495                         if not (buildpkgonly or fetchonly or pretend):
14496                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14497
14498         elif myaction in ("depclean", "info", "prune"):
14499
14500                 # Ensure atoms are valid before calling unmerge().
14501                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14502                 valid_atoms = []
14503                 for x in myfiles:
14504                         if is_valid_package_atom(x):
14505                                 try:
14506                                         valid_atoms.append(
14507                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
14508                                 except portage.exception.AmbiguousPackageName, e:
14509                                         msg = "The short ebuild name \"" + x + \
14510                                                 "\" is ambiguous.  Please specify " + \
14511                                                 "one of the following " + \
14512                                                 "fully-qualified ebuild names instead:"
14513                                         for line in textwrap.wrap(msg, 70):
14514                                                 writemsg_level("!!! %s\n" % (line,),
14515                                                         level=logging.ERROR, noiselevel=-1)
14516                                         for i in e[0]:
14517                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
14518                                                         level=logging.ERROR, noiselevel=-1)
14519                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14520                                         return 1
14521                                 continue
14522                         msg = []
14523                         msg.append("'%s' is not a valid package atom." % (x,))
14524                         msg.append("Please check ebuild(5) for full details.")
14525                         writemsg_level("".join("!!! %s\n" % line for line in msg),
14526                                 level=logging.ERROR, noiselevel=-1)
14527                         return 1
14528
14529                 if myaction == "info":
14530                         return action_info(settings, trees, myopts, valid_atoms)
14531
14532                 validate_ebuild_environment(trees)
14533                 action_depclean(settings, trees, mtimedb["ldpath"],
14534                         myopts, myaction, valid_atoms, spinner)
14535                 if not (buildpkgonly or fetchonly or pretend):
14536                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14537         # "update", "system", or just process files:
14538         else:
14539                 validate_ebuild_environment(trees)
14540                 if "--pretend" not in myopts:
14541                         display_news_notification(root_config, myopts)
14542                 retval = action_build(settings, trees, mtimedb,
14543                         myopts, myaction, myfiles, spinner)
14544                 root_config = trees[settings["ROOT"]]["root_config"]
14545                 post_emerge(root_config, myopts, mtimedb, retval)
14546
14547                 return retval