Add support for --rdeps-only and --root-deps options which are useful in
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
58
59 from itertools import chain, izip
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         from cStringIO import StringIO
68 except ImportError:
69         from StringIO import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge", "version",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--rdeps-only",   "--root-deps",
211 "--searchdesc",   "--selective",
212 "--skipfirst",
213 "--tree",
214 "--update",
215 "--usepkg",       "--usepkgonly",
216 "--verbose",
217 ]
218
219 shortmapping={
220 "1":"--oneshot",
221 "a":"--ask",
222 "b":"--buildpkg",  "B":"--buildpkgonly",
223 "c":"--clean",     "C":"--unmerge",
224 "d":"--debug",     "D":"--deep",
225 "e":"--emptytree",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "h":"--help",
229 "k":"--usepkg",    "K":"--usepkgonly",
230 "l":"--changelog",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps",  "O":"--nodeps",
233 "p":"--pretend",   "P":"--prune",
234 "q":"--quiet",
235 "s":"--search",    "S":"--searchdesc",
236 "t":"--tree",
237 "u":"--update",
238 "v":"--verbose",   "V":"--version"
239 }
240
241 def emergelog(xterm_titles, mystr, short_msg=None):
242         if xterm_titles and short_msg:
243                 if "HOSTNAME" in os.environ:
244                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
245                 xtermTitle(short_msg)
246         try:
247                 file_path = "/var/log/emerge.log"
248                 mylogfile = open(file_path, "a")
249                 portage.util.apply_secpass_permissions(file_path,
250                         uid=portage.portage_uid, gid=portage.portage_gid,
251                         mode=0660)
252                 mylock = None
253                 try:
254                         mylock = portage.locks.lockfile(mylogfile)
255                         # seek because we may have gotten held up by the lock.
256                         # if so, we may not be positioned at the end of the file.
257                         mylogfile.seek(0, 2)
258                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
259                         mylogfile.flush()
260                 finally:
261                         if mylock:
262                                 portage.locks.unlockfile(mylock)
263                         mylogfile.close()
264         except (IOError,OSError,portage.exception.PortageException), e:
265                 if secpass >= 1:
266                         print >> sys.stderr, "emergelog():",e
267
268 def countdown(secs=5, doing="Starting"):
269         if secs:
270                 print ">>> Waiting",secs,"seconds before starting..."
271                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
272                 ticks=range(secs)
273                 ticks.reverse()
274                 for sec in ticks:
275                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
276                         sys.stdout.flush()
277                         time.sleep(1)
278                 print
279
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282         if isinstance(mysize, basestring):
283                 return mysize
284         if 0 != mysize % 1024:
285                 # Always round up to the next kB so that it doesn't show 0 kB when
286                 # some small file still needs to be fetched.
287                 mysize += 1024 - mysize % 1024
288         mystr=str(mysize/1024)
289         mycount=len(mystr)
290         while (mycount > 3):
291                 mycount-=3
292                 mystr=mystr[:mycount]+","+mystr[mycount:]
293         return mystr+" kB"
294
295
296 def getgccversion(chost):
297         """
298         rtype: C{str}
299         return:  the current in-use gcc version
300         """
301
302         gcc_ver_command = 'gcc -dumpversion'
303         gcc_ver_prefix = 'gcc-'
304
305         gcc_not_found_error = red(
306         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307         "!!! to update the environment of this terminal and possibly\n" +
308         "!!! other terminals also.\n"
309         )
310
311         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314
315         mystatus, myoutput = commands.getstatusoutput(
316                 chost + "-" + gcc_ver_command)
317         if mystatus == os.EX_OK:
318                 return gcc_ver_prefix + myoutput
319
320         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321         if mystatus == os.EX_OK:
322                 return gcc_ver_prefix + myoutput
323
324         portage.writemsg(gcc_not_found_error, noiselevel=-1)
325         return "[unavailable]"
326
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328         profilever = "unavailable"
329         if profile:
330                 realpath = os.path.realpath(profile)
331                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
332                 if realpath.startswith(basepath):
333                         profilever = realpath[1 + len(basepath):]
334                 else:
335                         try:
336                                 profilever = "!" + os.readlink(profile)
337                         except (OSError):
338                                 pass
339                 del realpath, basepath
340
341         libcver=[]
342         libclist  = vardb.match("virtual/libc")
343         libclist += vardb.match("virtual/glibc")
344         libclist  = portage.util.unique_array(libclist)
345         for x in libclist:
346                 xs=portage.catpkgsplit(x)
347                 if libcver:
348                         libcver+=","+"-".join(xs[1:])
349                 else:
350                         libcver="-".join(xs[1:])
351         if libcver==[]:
352                 libcver="unavailable"
353
354         gccver = getgccversion(chost)
355         unameout=platform.release()+" "+platform.machine()
356
357         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358
359 def create_depgraph_params(myopts, myaction):
360         #configure emerge engine parameters
361         #
362         # self:      include _this_ package regardless of if it is merged.
363         # selective: exclude the package if it is merged
364         # recurse:   go into the dependencies
365         # deep:      go into the dependencies of already merged packages
366         # empty:     pretend nothing is merged
367         # complete:  completely account for all known dependencies
368         # remove:    build graph for use in removing packages
369         myparams = set(["recurse"])
370
371         if myaction == "remove":
372                 myparams.add("remove")
373                 myparams.add("complete")
374                 return myparams
375
376         if "--update" in myopts or \
377                 "--newuse" in myopts or \
378                 "--reinstall" in myopts or \
379                 "--noreplace" in myopts:
380                 myparams.add("selective")
381         if "--emptytree" in myopts:
382                 myparams.add("empty")
383                 myparams.discard("selective")
384         if "--nodeps" in myopts:
385                 myparams.discard("recurse")
386         if "--deep" in myopts:
387                 myparams.add("deep")
388         if "--complete-graph" in myopts:
389                 myparams.add("complete")
390         return myparams
391
392 # search functionality
393 class search(object):
394
395         #
396         # class constants
397         #
398         VERSION_SHORT=1
399         VERSION_RELEASE=2
400
401         #
402         # public interface
403         #
404         def __init__(self, root_config, spinner, searchdesc,
405                 verbose, usepkg, usepkgonly):
406                 """Searches the available and installed packages for the supplied search key.
407                 The list of available and installed packages is created at object instantiation.
408                 This makes successive searches faster."""
409                 self.settings = root_config.settings
410                 self.vartree = root_config.trees["vartree"]
411                 self.spinner = spinner
412                 self.verbose = verbose
413                 self.searchdesc = searchdesc
414                 self.root_config = root_config
415                 self.setconfig = root_config.setconfig
416                 self.matches = {"pkg" : []}
417                 self.mlen = 0
418
419                 def fake_portdb():
420                         pass
421                 self.portdb = fake_portdb
422                 for attrib in ("aux_get", "cp_all",
423                         "xmatch", "findname", "getFetchMap"):
424                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
425
426                 self._dbs = []
427
428                 portdb = root_config.trees["porttree"].dbapi
429                 bindb = root_config.trees["bintree"].dbapi
430                 vardb = root_config.trees["vartree"].dbapi
431
432                 if not usepkgonly and portdb._have_root_eclass_dir:
433                         self._dbs.append(portdb)
434
435                 if (usepkg or usepkgonly) and bindb.cp_all():
436                         self._dbs.append(bindb)
437
438                 self._dbs.append(vardb)
439                 self._portdb = portdb
440
441         def _cp_all(self):
442                 cp_all = set()
443                 for db in self._dbs:
444                         cp_all.update(db.cp_all())
445                 return list(sorted(cp_all))
446
447         def _aux_get(self, *args, **kwargs):
448                 for db in self._dbs:
449                         try:
450                                 return db.aux_get(*args, **kwargs)
451                         except KeyError:
452                                 pass
453                 raise
454
455         def _findname(self, *args, **kwargs):
456                 for db in self._dbs:
457                         if db is not self._portdb:
458                                 # We don't want findname to return anything
459                                 # unless it's an ebuild in a portage tree.
460                                 # Otherwise, it's already built and we don't
461                                 # care about it.
462                                 continue
463                         func = getattr(db, "findname", None)
464                         if func:
465                                 value = func(*args, **kwargs)
466                                 if value:
467                                         return value
468                 return None
469
470         def _getFetchMap(self, *args, **kwargs):
471                 for db in self._dbs:
472                         func = getattr(db, "getFetchMap", None)
473                         if func:
474                                 value = func(*args, **kwargs)
475                                 if value:
476                                         return value
477                 return {}
478
479         def _visible(self, db, cpv, metadata):
480                 installed = db is self.vartree.dbapi
481                 built = installed or db is not self._portdb
482                 pkg_type = "ebuild"
483                 if installed:
484                         pkg_type = "installed"
485                 elif built:
486                         pkg_type = "binary"
487                 return visible(self.settings,
488                         Package(type_name=pkg_type, root_config=self.root_config,
489                         cpv=cpv, built=built, installed=installed, metadata=metadata))
490
491         def _xmatch(self, level, atom):
492                 """
493                 This method does not expand old-style virtuals because it
494                 is restricted to returning matches for a single ${CATEGORY}/${PN}
495                 and old-style virual matches unreliable for that when querying
496                 multiple package databases. If necessary, old-style virtuals
497                 can be performed on atoms prior to calling this method.
498                 """
499                 cp = portage.dep_getkey(atom)
500                 if level == "match-all":
501                         matches = set()
502                         for db in self._dbs:
503                                 if hasattr(db, "xmatch"):
504                                         matches.update(db.xmatch(level, atom))
505                                 else:
506                                         matches.update(db.match(atom))
507                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508                         db._cpv_sort_ascending(result)
509                 elif level == "match-visible":
510                         matches = set()
511                         for db in self._dbs:
512                                 if hasattr(db, "xmatch"):
513                                         matches.update(db.xmatch(level, atom))
514                                 else:
515                                         db_keys = list(db._aux_cache_keys)
516                                         for cpv in db.match(atom):
517                                                 metadata = izip(db_keys,
518                                                         db.aux_get(cpv, db_keys))
519                                                 if not self._visible(db, cpv, metadata):
520                                                         continue
521                                                 matches.add(cpv)
522                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523                         db._cpv_sort_ascending(result)
524                 elif level == "bestmatch-visible":
525                         result = None
526                         for db in self._dbs:
527                                 if hasattr(db, "xmatch"):
528                                         cpv = db.xmatch("bestmatch-visible", atom)
529                                         if not cpv or portage.cpv_getkey(cpv) != cp:
530                                                 continue
531                                         if not result or cpv == portage.best([cpv, result]):
532                                                 result = cpv
533                                 else:
534                                         db_keys = Package.metadata_keys
535                                         # break out of this loop with highest visible
536                                         # match, checked in descending order
537                                         for cpv in reversed(db.match(atom)):
538                                                 if portage.cpv_getkey(cpv) != cp:
539                                                         continue
540                                                 metadata = izip(db_keys,
541                                                         db.aux_get(cpv, db_keys))
542                                                 if not self._visible(db, cpv, metadata):
543                                                         continue
544                                                 if not result or cpv == portage.best([cpv, result]):
545                                                         result = cpv
546                                                 break
547                 else:
548                         raise NotImplementedError(level)
549                 return result
550
551         def execute(self,searchkey):
552                 """Performs the search for the supplied search key"""
553                 match_category = 0
554                 self.searchkey=searchkey
555                 self.packagematches = []
556                 if self.searchdesc:
557                         self.searchdesc=1
558                         self.matches = {"pkg":[], "desc":[], "set":[]}
559                 else:
560                         self.searchdesc=0
561                         self.matches = {"pkg":[], "set":[]}
562                 print "Searching...   ",
563
564                 regexsearch = False
565                 if self.searchkey.startswith('%'):
566                         regexsearch = True
567                         self.searchkey = self.searchkey[1:]
568                 if self.searchkey.startswith('@'):
569                         match_category = 1
570                         self.searchkey = self.searchkey[1:]
571                 if regexsearch:
572                         self.searchre=re.compile(self.searchkey,re.I)
573                 else:
574                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
575                 for package in self.portdb.cp_all():
576                         self.spinner.update()
577
578                         if match_category:
579                                 match_string  = package[:]
580                         else:
581                                 match_string  = package.split("/")[-1]
582
583                         masked=0
584                         if self.searchre.search(match_string):
585                                 if not self.portdb.xmatch("match-visible", package):
586                                         masked=1
587                                 self.matches["pkg"].append([package,masked])
588                         elif self.searchdesc: # DESCRIPTION searching
589                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
590                                 if not full_package:
591                                         #no match found; we don't want to query description
592                                         full_package = portage.best(
593                                                 self.portdb.xmatch("match-all", package))
594                                         if not full_package:
595                                                 continue
596                                         else:
597                                                 masked=1
598                                 try:
599                                         full_desc = self.portdb.aux_get(
600                                                 full_package, ["DESCRIPTION"])[0]
601                                 except KeyError:
602                                         print "emerge: search: aux_get() failed, skipping"
603                                         continue
604                                 if self.searchre.search(full_desc):
605                                         self.matches["desc"].append([full_package,masked])
606
607                 self.sdict = self.setconfig.getSets()
608                 for setname in self.sdict:
609                         self.spinner.update()
610                         if match_category:
611                                 match_string = setname
612                         else:
613                                 match_string = setname.split("/")[-1]
614                         
615                         if self.searchre.search(match_string):
616                                 self.matches["set"].append([setname, False])
617                         elif self.searchdesc:
618                                 if self.searchre.search(
619                                         self.sdict[setname].getMetadata("DESCRIPTION")):
620                                         self.matches["set"].append([setname, False])
621                         
622                 self.mlen=0
623                 for mtype in self.matches:
624                         self.matches[mtype].sort()
625                         self.mlen += len(self.matches[mtype])
626
627         def addCP(self, cp):
628                 if not self.portdb.xmatch("match-all", cp):
629                         return
630                 masked = 0
631                 if not self.portdb.xmatch("bestmatch-visible", cp):
632                         masked = 1
633                 self.matches["pkg"].append([cp, masked])
634                 self.mlen += 1
635
636         def output(self):
637                 """Outputs the results of the search."""
638                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
639                 print "[ Applications found : "+white(str(self.mlen))+" ]"
640                 print " "
641                 vardb = self.vartree.dbapi
642                 for mtype in self.matches:
643                         for match,masked in self.matches[mtype]:
644                                 full_package = None
645                                 if mtype == "pkg":
646                                         catpack = match
647                                         full_package = self.portdb.xmatch(
648                                                 "bestmatch-visible", match)
649                                         if not full_package:
650                                                 #no match found; we don't want to query description
651                                                 masked=1
652                                                 full_package = portage.best(
653                                                         self.portdb.xmatch("match-all",match))
654                                 elif mtype == "desc":
655                                         full_package = match
656                                         match        = portage.cpv_getkey(match)
657                                 elif mtype == "set":
658                                         print green("*")+"  "+white(match)
659                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
660                                         print
661                                 if full_package:
662                                         try:
663                                                 desc, homepage, license = self.portdb.aux_get(
664                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665                                         except KeyError:
666                                                 print "emerge: search: aux_get() failed, skipping"
667                                                 continue
668                                         if masked:
669                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
670                                         else:
671                                                 print green("*")+"  "+white(match)
672                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
673
674                                         mysum = [0,0]
675                                         file_size_str = None
676                                         mycat = match.split("/")[0]
677                                         mypkg = match.split("/")[1]
678                                         mycpv = match + "-" + myversion
679                                         myebuild = self.portdb.findname(mycpv)
680                                         if myebuild:
681                                                 pkgdir = os.path.dirname(myebuild)
682                                                 from portage import manifest
683                                                 mf = manifest.Manifest(
684                                                         pkgdir, self.settings["DISTDIR"])
685                                                 try:
686                                                         uri_map = self.portdb.getFetchMap(mycpv)
687                                                 except portage.exception.InvalidDependString, e:
688                                                         file_size_str = "Unknown (%s)" % (e,)
689                                                         del e
690                                                 else:
691                                                         try:
692                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
693                                                         except KeyError, e:
694                                                                 file_size_str = "Unknown (missing " + \
695                                                                         "digest for %s)" % (e,)
696                                                                 del e
697
698                                         available = False
699                                         for db in self._dbs:
700                                                 if db is not vardb and \
701                                                         db.cpv_exists(mycpv):
702                                                         available = True
703                                                         if not myebuild and hasattr(db, "bintree"):
704                                                                 myebuild = db.bintree.getname(mycpv)
705                                                                 try:
706                                                                         mysum[0] = os.stat(myebuild).st_size
707                                                                 except OSError:
708                                                                         myebuild = None
709                                                         break
710
711                                         if myebuild and file_size_str is None:
712                                                 mystr = str(mysum[0] / 1024)
713                                                 mycount = len(mystr)
714                                                 while (mycount > 3):
715                                                         mycount -= 3
716                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
717                                                 file_size_str = mystr + " kB"
718
719                                         if self.verbose:
720                                                 if available:
721                                                         print "     ", darkgreen("Latest version available:"),myversion
722                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
723                                                 if myebuild:
724                                                         print "      %s %s" % \
725                                                                 (darkgreen("Size of files:"), file_size_str)
726                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
727                                                 print "     ", darkgreen("Description:")+"  ",desc
728                                                 print "     ", darkgreen("License:")+"      ",license
729                                                 print
730         #
731         # private interface
732         #
733         def getInstallationStatus(self,package):
734                 installed_package = self.vartree.dep_bestmatch(package)
735                 result = ""
736                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737                 if len(version) > 0:
738                         result = darkgreen("Latest version installed:")+" "+version
739                 else:
740                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741                 return result
742
743         def getVersion(self,full_package,detail):
744                 if len(full_package) > 1:
745                         package_parts = portage.catpkgsplit(full_package)
746                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747                                 result = package_parts[2]+ "-" + package_parts[3]
748                         else:
749                                 result = package_parts[2]
750                 else:
751                         result = ""
752                 return result
753
754 class RootConfig(object):
755         """This is used internally by depgraph to track information about a
756         particular $ROOT."""
757
758         pkg_tree_map = {
759                 "ebuild"    : "porttree",
760                 "binary"    : "bintree",
761                 "installed" : "vartree"
762         }
763
764         tree_pkg_map = {}
765         for k, v in pkg_tree_map.iteritems():
766                 tree_pkg_map[v] = k
767
768         def __init__(self, settings, trees, setconfig):
769                 self.trees = trees
770                 self.settings = settings
771                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772                 self.root = self.settings["ROOT"]
773                 self.setconfig = setconfig
774                 self.sets = self.setconfig.getSets()
775                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
776
777 def create_world_atom(pkg, args_set, root_config):
778         """Create a new atom for the world file if one does not exist.  If the
779         argument atom is precise enough to identify a specific slot then a slot
780         atom will be returned. Atoms that are in the system set may also be stored
781         in world since system atoms can only match one slot while world atoms can
782         be greedy with respect to slots.  Unslotted system packages will not be
783         stored in world."""
784
785         arg_atom = args_set.findAtomForPackage(pkg)
786         if not arg_atom:
787                 return None
788         cp = portage.dep_getkey(arg_atom)
789         new_world_atom = cp
790         sets = root_config.sets
791         portdb = root_config.trees["porttree"].dbapi
792         vardb = root_config.trees["vartree"].dbapi
793         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
794                 for cpv in portdb.match(cp))
795         slotted = len(available_slots) > 1 or \
796                 (len(available_slots) == 1 and "0" not in available_slots)
797         if not slotted:
798                 # check the vdb in case this is multislot
799                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
800                         for cpv in vardb.match(cp))
801                 slotted = len(available_slots) > 1 or \
802                         (len(available_slots) == 1 and "0" not in available_slots)
803         if slotted and arg_atom != cp:
804                 # If the user gave a specific atom, store it as a
805                 # slot atom in the world file.
806                 slot_atom = pkg.slot_atom
807
808                 # For USE=multislot, there are a couple of cases to
809                 # handle here:
810                 #
811                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
812                 #    unknown value, so just record an unslotted atom.
813                 #
814                 # 2) SLOT comes from an installed package and there is no
815                 #    matching SLOT in the portage tree.
816                 #
817                 # Make sure that the slot atom is available in either the
818                 # portdb or the vardb, since otherwise the user certainly
819                 # doesn't want the SLOT atom recorded in the world file
820                 # (case 1 above).  If it's only available in the vardb,
821                 # the user may be trying to prevent a USE=multislot
822                 # package from being removed by --depclean (case 2 above).
823
824                 mydb = portdb
825                 if not portdb.match(slot_atom):
826                         # SLOT seems to come from an installed multislot package
827                         mydb = vardb
828                 # If there is no installed package matching the SLOT atom,
829                 # it probably changed SLOT spontaneously due to USE=multislot,
830                 # so just record an unslotted atom.
831                 if vardb.match(slot_atom):
832                         # Now verify that the argument is precise
833                         # enough to identify a specific slot.
834                         matches = mydb.match(arg_atom)
835                         matched_slots = set()
836                         for cpv in matches:
837                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
838                         if len(matched_slots) == 1:
839                                 new_world_atom = slot_atom
840
841         if new_world_atom == sets["world"].findAtomForPackage(pkg):
842                 # Both atoms would be identical, so there's nothing to add.
843                 return None
844         if not slotted:
845                 # Unlike world atoms, system atoms are not greedy for slots, so they
846                 # can't be safely excluded from world if they are slotted.
847                 system_atom = sets["system"].findAtomForPackage(pkg)
848                 if system_atom:
849                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
850                                 return None
851                         # System virtuals aren't safe to exclude from world since they can
852                         # match multiple old-style virtuals but only one of them will be
853                         # pulled in by update or depclean.
854                         providers = portdb.mysettings.getvirtuals().get(
855                                 portage.dep_getkey(system_atom))
856                         if providers and len(providers) == 1 and providers[0] == cp:
857                                 return None
858         return new_world_atom
859
860 def filter_iuse_defaults(iuse):
861         for flag in iuse:
862                 if flag.startswith("+") or flag.startswith("-"):
863                         yield flag[1:]
864                 else:
865                         yield flag
866
867 class SlotObject(object):
868         __slots__ = ("__weakref__",)
869
870         def __init__(self, **kwargs):
871                 classes = [self.__class__]
872                 while classes:
873                         c = classes.pop()
874                         if c is SlotObject:
875                                 continue
876                         classes.extend(c.__bases__)
877                         slots = getattr(c, "__slots__", None)
878                         if not slots:
879                                 continue
880                         for myattr in slots:
881                                 myvalue = kwargs.get(myattr, None)
882                                 setattr(self, myattr, myvalue)
883
884         def copy(self):
885                 """
886                 Create a new instance and copy all attributes
887                 defined from __slots__ (including those from
888                 inherited classes).
889                 """
890                 obj = self.__class__()
891
892                 classes = [self.__class__]
893                 while classes:
894                         c = classes.pop()
895                         if c is SlotObject:
896                                 continue
897                         classes.extend(c.__bases__)
898                         slots = getattr(c, "__slots__", None)
899                         if not slots:
900                                 continue
901                         for myattr in slots:
902                                 setattr(obj, myattr, getattr(self, myattr))
903
904                 return obj
905
906 class AbstractDepPriority(SlotObject):
907         __slots__ = ("buildtime", "runtime", "runtime_post")
908
909         def __lt__(self, other):
910                 return self.__int__() < other
911
912         def __le__(self, other):
913                 return self.__int__() <= other
914
915         def __eq__(self, other):
916                 return self.__int__() == other
917
918         def __ne__(self, other):
919                 return self.__int__() != other
920
921         def __gt__(self, other):
922                 return self.__int__() > other
923
924         def __ge__(self, other):
925                 return self.__int__() >= other
926
927         def copy(self):
928                 import copy
929                 return copy.copy(self)
930
931 class DepPriority(AbstractDepPriority):
932
933         __slots__ = ("satisfied", "optional", "rebuild")
934
935         def __int__(self):
936                 return 0
937
938         def __str__(self):
939                 if self.optional:
940                         return "optional"
941                 if self.buildtime:
942                         return "buildtime"
943                 if self.runtime:
944                         return "runtime"
945                 if self.runtime_post:
946                         return "runtime_post"
947                 return "soft"
948
949 class BlockerDepPriority(DepPriority):
950         __slots__ = ()
951         def __int__(self):
952                 return 0
953
954         def __str__(self):
955                 return 'blocker'
956
957 BlockerDepPriority.instance = BlockerDepPriority()
958
959 class UnmergeDepPriority(AbstractDepPriority):
960         __slots__ = ("optional", "satisfied",)
961         """
962         Combination of properties           Priority  Category
963
964         runtime                                0       HARD
965         runtime_post                          -1       HARD
966         buildtime                             -2       SOFT
967         (none of the above)                   -2       SOFT
968         """
969
970         MAX    =  0
971         SOFT   = -2
972         MIN    = -2
973
974         def __int__(self):
975                 if self.runtime:
976                         return 0
977                 if self.runtime_post:
978                         return -1
979                 if self.buildtime:
980                         return -2
981                 return -2
982
983         def __str__(self):
984                 myvalue = self.__int__()
985                 if myvalue > self.SOFT:
986                         return "hard"
987                 return "soft"
988
989 class DepPriorityNormalRange(object):
990         """
991         DepPriority properties              Index      Category
992
993         buildtime                                      HARD
994         runtime                                3       MEDIUM
995         runtime_post                           2       MEDIUM_SOFT
996         optional                               1       SOFT
997         (none of the above)                    0       NONE
998         """
999         MEDIUM      = 3
1000         MEDIUM_SOFT = 2
1001         SOFT        = 1
1002         NONE        = 0
1003
1004         @classmethod
1005         def _ignore_optional(cls, priority):
1006                 if priority.__class__ is not DepPriority:
1007                         return False
1008                 return bool(priority.optional)
1009
1010         @classmethod
1011         def _ignore_runtime_post(cls, priority):
1012                 if priority.__class__ is not DepPriority:
1013                         return False
1014                 return bool(priority.optional or priority.runtime_post)
1015
1016         @classmethod
1017         def _ignore_runtime(cls, priority):
1018                 if priority.__class__ is not DepPriority:
1019                         return False
1020                 return not priority.buildtime
1021
1022         ignore_medium      = _ignore_runtime
1023         ignore_medium_soft = _ignore_runtime_post
1024         ignore_soft        = _ignore_optional
1025
1026 DepPriorityNormalRange.ignore_priority = (
1027         None,
1028         DepPriorityNormalRange._ignore_optional,
1029         DepPriorityNormalRange._ignore_runtime_post,
1030         DepPriorityNormalRange._ignore_runtime
1031 )
1032
1033 class DepPrioritySatisfiedRange(object):
1034         """
1035         DepPriority                         Index      Category
1036
1037         not satisfied and buildtime                    HARD
1038         not satisfied and runtime              7       MEDIUM
1039         not satisfied and runtime_post         6       MEDIUM_SOFT
1040         satisfied and buildtime and rebuild    5       SOFT
1041         satisfied and buildtime                4       SOFT
1042         satisfied and runtime                  3       SOFT
1043         satisfied and runtime_post             2       SOFT
1044         optional                               1       SOFT
1045         (none of the above)                    0       NONE
1046         """
1047         MEDIUM      = 7
1048         MEDIUM_SOFT = 6
1049         SOFT        = 5
1050         NONE        = 0
1051
1052         @classmethod
1053         def _ignore_optional(cls, priority):
1054                 if priority.__class__ is not DepPriority:
1055                         return False
1056                 return bool(priority.optional)
1057
1058         @classmethod
1059         def _ignore_satisfied_runtime_post(cls, priority):
1060                 if priority.__class__ is not DepPriority:
1061                         return False
1062                 if priority.optional:
1063                         return True
1064                 if not priority.satisfied:
1065                         return False
1066                 return bool(priority.runtime_post)
1067
1068         @classmethod
1069         def _ignore_satisfied_runtime(cls, priority):
1070                 if priority.__class__ is not DepPriority:
1071                         return False
1072                 if priority.optional:
1073                         return True
1074                 if not priority.satisfied:
1075                         return False
1076                 return not priority.buildtime
1077
1078         @classmethod
1079         def _ignore_satisfied_buildtime(cls, priority):
1080                 if priority.__class__ is not DepPriority:
1081                         return False
1082                 if priority.optional:
1083                         return True
1084                 if not priority.satisfied:
1085                         return False
1086                 if priority.buildtime:
1087                         return not priority.rebuild
1088                 return True
1089
1090         @classmethod
1091         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1092                 if priority.__class__ is not DepPriority:
1093                         return False
1094                 if priority.optional:
1095                         return True
1096                 return bool(priority.satisfied)
1097
1098         @classmethod
1099         def _ignore_runtime_post(cls, priority):
1100                 if priority.__class__ is not DepPriority:
1101                         return False
1102                 return bool(priority.optional or \
1103                         priority.satisfied or \
1104                         priority.runtime_post)
1105
1106         @classmethod
1107         def _ignore_runtime(cls, priority):
1108                 if priority.__class__ is not DepPriority:
1109                         return False
1110                 return bool(priority.satisfied or \
1111                         not priority.buildtime)
1112
1113         ignore_medium      = _ignore_runtime
1114         ignore_medium_soft = _ignore_runtime_post
1115         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1116
1117 DepPrioritySatisfiedRange.ignore_priority = (
1118         None,
1119         DepPrioritySatisfiedRange._ignore_optional,
1120         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1121         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1122         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1123         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1124         DepPrioritySatisfiedRange._ignore_runtime_post,
1125         DepPrioritySatisfiedRange._ignore_runtime
1126 )
1127
1128 def _find_deep_system_runtime_deps(graph):
1129         deep_system_deps = set()
1130         node_stack = []
1131         for node in graph:
1132                 if not isinstance(node, Package) or \
1133                         node.operation == 'uninstall':
1134                         continue
1135                 if node.root_config.sets['system'].findAtomForPackage(node):
1136                         node_stack.append(node)
1137
1138         def ignore_priority(priority):
1139                 """
1140                 Ignore non-runtime priorities.
1141                 """
1142                 if isinstance(priority, DepPriority) and \
1143                         (priority.runtime or priority.runtime_post):
1144                         return False
1145                 return True
1146
1147         while node_stack:
1148                 node = node_stack.pop()
1149                 if node in deep_system_deps:
1150                         continue
1151                 deep_system_deps.add(node)
1152                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1153                         if not isinstance(child, Package) or \
1154                                 child.operation == 'uninstall':
1155                                 continue
1156                         node_stack.append(child)
1157
1158         return deep_system_deps
1159
1160 class FakeVartree(portage.vartree):
1161         """This is implements an in-memory copy of a vartree instance that provides
1162         all the interfaces required for use by the depgraph.  The vardb is locked
1163         during the constructor call just long enough to read a copy of the
1164         installed package information.  This allows the depgraph to do it's
1165         dependency calculations without holding a lock on the vardb.  It also
1166         allows things like vardb global updates to be done in memory so that the
1167         user doesn't necessarily need write access to the vardb in cases where
1168         global updates are necessary (updates are performed when necessary if there
1169         is not a matching ebuild in the tree)."""
1170         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1171                 self._root_config = root_config
1172                 if pkg_cache is None:
1173                         pkg_cache = {}
1174                 real_vartree = root_config.trees["vartree"]
1175                 portdb = root_config.trees["porttree"].dbapi
1176                 self.root = real_vartree.root
1177                 self.settings = real_vartree.settings
1178                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1179                 if "_mtime_" not in mykeys:
1180                         mykeys.append("_mtime_")
1181                 self._db_keys = mykeys
1182                 self._pkg_cache = pkg_cache
1183                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1184                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1185                 try:
1186                         # At least the parent needs to exist for the lock file.
1187                         portage.util.ensure_dirs(vdb_path)
1188                 except portage.exception.PortageException:
1189                         pass
1190                 vdb_lock = None
1191                 try:
1192                         if acquire_lock and os.access(vdb_path, os.W_OK):
1193                                 vdb_lock = portage.locks.lockdir(vdb_path)
1194                         real_dbapi = real_vartree.dbapi
1195                         slot_counters = {}
1196                         for cpv in real_dbapi.cpv_all():
1197                                 cache_key = ("installed", self.root, cpv, "nomerge")
1198                                 pkg = self._pkg_cache.get(cache_key)
1199                                 if pkg is not None:
1200                                         metadata = pkg.metadata
1201                                 else:
1202                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1203                                 myslot = metadata["SLOT"]
1204                                 mycp = portage.dep_getkey(cpv)
1205                                 myslot_atom = "%s:%s" % (mycp, myslot)
1206                                 try:
1207                                         mycounter = long(metadata["COUNTER"])
1208                                 except ValueError:
1209                                         mycounter = 0
1210                                         metadata["COUNTER"] = str(mycounter)
1211                                 other_counter = slot_counters.get(myslot_atom, None)
1212                                 if other_counter is not None:
1213                                         if other_counter > mycounter:
1214                                                 continue
1215                                 slot_counters[myslot_atom] = mycounter
1216                                 if pkg is None:
1217                                         pkg = Package(built=True, cpv=cpv,
1218                                                 installed=True, metadata=metadata,
1219                                                 root_config=root_config, type_name="installed")
1220                                 self._pkg_cache[pkg] = pkg
1221                                 self.dbapi.cpv_inject(pkg)
1222                         real_dbapi.flush_cache()
1223                 finally:
1224                         if vdb_lock:
1225                                 portage.locks.unlockdir(vdb_lock)
1226                 # Populate the old-style virtuals using the cached values.
1227                 if not self.settings.treeVirtuals:
1228                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1229                                 portage.getCPFromCPV, self.get_all_provides())
1230
1231                 # Intialize variables needed for lazy cache pulls of the live ebuild
1232                 # metadata.  This ensures that the vardb lock is released ASAP, without
1233                 # being delayed in case cache generation is triggered.
1234                 self._aux_get = self.dbapi.aux_get
1235                 self.dbapi.aux_get = self._aux_get_wrapper
1236                 self._match = self.dbapi.match
1237                 self.dbapi.match = self._match_wrapper
1238                 self._aux_get_history = set()
1239                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1240                 self._portdb = portdb
1241                 self._global_updates = None
1242
1243         def _match_wrapper(self, cpv, use_cache=1):
1244                 """
1245                 Make sure the metadata in Package instances gets updated for any
1246                 cpv that is returned from a match() call, since the metadata can
1247                 be accessed directly from the Package instance instead of via
1248                 aux_get().
1249                 """
1250                 matches = self._match(cpv, use_cache=use_cache)
1251                 for cpv in matches:
1252                         if cpv in self._aux_get_history:
1253                                 continue
1254                         self._aux_get_wrapper(cpv, [])
1255                 return matches
1256
1257         def _aux_get_wrapper(self, pkg, wants):
1258                 if pkg in self._aux_get_history:
1259                         return self._aux_get(pkg, wants)
1260                 self._aux_get_history.add(pkg)
1261                 try:
1262                         # Use the live ebuild metadata if possible.
1263                         live_metadata = dict(izip(self._portdb_keys,
1264                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1265                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1266                                 raise KeyError(pkg)
1267                         self.dbapi.aux_update(pkg, live_metadata)
1268                 except (KeyError, portage.exception.PortageException):
1269                         if self._global_updates is None:
1270                                 self._global_updates = \
1271                                         grab_global_updates(self._portdb.porttree_root)
1272                         perform_global_updates(
1273                                 pkg, self.dbapi, self._global_updates)
1274                 return self._aux_get(pkg, wants)
1275
1276         def sync(self, acquire_lock=1):
1277                 """
1278                 Call this method to synchronize state with the real vardb
1279                 after one or more packages may have been installed or
1280                 uninstalled.
1281                 """
1282                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1283                 try:
1284                         # At least the parent needs to exist for the lock file.
1285                         portage.util.ensure_dirs(vdb_path)
1286                 except portage.exception.PortageException:
1287                         pass
1288                 vdb_lock = None
1289                 try:
1290                         if acquire_lock and os.access(vdb_path, os.W_OK):
1291                                 vdb_lock = portage.locks.lockdir(vdb_path)
1292                         self._sync()
1293                 finally:
1294                         if vdb_lock:
1295                                 portage.locks.unlockdir(vdb_lock)
1296
1297         def _sync(self):
1298
1299                 real_vardb = self._root_config.trees["vartree"].dbapi
1300                 current_cpv_set = frozenset(real_vardb.cpv_all())
1301                 pkg_vardb = self.dbapi
1302                 aux_get_history = self._aux_get_history
1303
1304                 # Remove any packages that have been uninstalled.
1305                 for pkg in list(pkg_vardb):
1306                         if pkg.cpv not in current_cpv_set:
1307                                 pkg_vardb.cpv_remove(pkg)
1308                                 aux_get_history.discard(pkg.cpv)
1309
1310                 # Validate counters and timestamps.
1311                 slot_counters = {}
1312                 root = self.root
1313                 validation_keys = ["COUNTER", "_mtime_"]
1314                 for cpv in current_cpv_set:
1315
1316                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1317                         pkg = pkg_vardb.get(pkg_hash_key)
1318                         if pkg is not None:
1319                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1320                                 try:
1321                                         counter = long(counter)
1322                                 except ValueError:
1323                                         counter = 0
1324
1325                                 if counter != pkg.counter or \
1326                                         mtime != pkg.mtime:
1327                                         pkg_vardb.cpv_remove(pkg)
1328                                         aux_get_history.discard(pkg.cpv)
1329                                         pkg = None
1330
1331                         if pkg is None:
1332                                 pkg = self._pkg(cpv)
1333
1334                         other_counter = slot_counters.get(pkg.slot_atom)
1335                         if other_counter is not None:
1336                                 if other_counter > pkg.counter:
1337                                         continue
1338
1339                         slot_counters[pkg.slot_atom] = pkg.counter
1340                         pkg_vardb.cpv_inject(pkg)
1341
1342                 real_vardb.flush_cache()
1343
1344         def _pkg(self, cpv):
1345                 root_config = self._root_config
1346                 real_vardb = root_config.trees["vartree"].dbapi
1347                 pkg = Package(cpv=cpv, installed=True,
1348                         metadata=izip(self._db_keys,
1349                         real_vardb.aux_get(cpv, self._db_keys)),
1350                         root_config=root_config,
1351                         type_name="installed")
1352
1353                 try:
1354                         mycounter = long(pkg.metadata["COUNTER"])
1355                 except ValueError:
1356                         mycounter = 0
1357                         pkg.metadata["COUNTER"] = str(mycounter)
1358
1359                 return pkg
1360
1361 def grab_global_updates(portdir):
1362         from portage.update import grab_updates, parse_updates
1363         updpath = os.path.join(portdir, "profiles", "updates")
1364         try:
1365                 rawupdates = grab_updates(updpath)
1366         except portage.exception.DirectoryNotFound:
1367                 rawupdates = []
1368         upd_commands = []
1369         for mykey, mystat, mycontent in rawupdates:
1370                 commands, errors = parse_updates(mycontent)
1371                 upd_commands.extend(commands)
1372         return upd_commands
1373
1374 def perform_global_updates(mycpv, mydb, mycommands):
1375         from portage.update import update_dbentries
1376         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1377         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1378         updates = update_dbentries(mycommands, aux_dict)
1379         if updates:
1380                 mydb.aux_update(mycpv, updates)
1381
1382 def visible(pkgsettings, pkg):
1383         """
1384         Check if a package is visible. This can raise an InvalidDependString
1385         exception if LICENSE is invalid.
1386         TODO: optionally generate a list of masking reasons
1387         @rtype: Boolean
1388         @returns: True if the package is visible, False otherwise.
1389         """
1390         if not pkg.metadata["SLOT"]:
1391                 return False
1392         if not pkg.installed:
1393                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1394                         return False
1395         eapi = pkg.metadata["EAPI"]
1396         if not portage.eapi_is_supported(eapi):
1397                 return False
1398         if not pkg.installed:
1399                 if portage._eapi_is_deprecated(eapi):
1400                         return False
1401                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1402                         return False
1403         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406                 return False
1407         try:
1408                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1409                         return False
1410         except portage.exception.InvalidDependString:
1411                 return False
1412         return True
1413
1414 def get_masking_status(pkg, pkgsettings, root_config):
1415
1416         mreasons = portage.getmaskingstatus(
1417                 pkg, settings=pkgsettings,
1418                 portdb=root_config.trees["porttree"].dbapi)
1419
1420         if not pkg.installed:
1421                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1422                         mreasons.append("CHOST: %s" % \
1423                                 pkg.metadata["CHOST"])
1424
1425         if not pkg.metadata["SLOT"]:
1426                 mreasons.append("invalid: SLOT is undefined")
1427
1428         return mreasons
1429
1430 def get_mask_info(root_config, cpv, pkgsettings,
1431         db, pkg_type, built, installed, db_keys):
1432         eapi_masked = False
1433         try:
1434                 metadata = dict(izip(db_keys,
1435                         db.aux_get(cpv, db_keys)))
1436         except KeyError:
1437                 metadata = None
1438         if metadata and not built:
1439                 pkgsettings.setcpv(cpv, mydb=metadata)
1440                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1441                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1442         if metadata is None:
1443                 mreasons = ["corruption"]
1444         else:
1445                 eapi = metadata['EAPI']
1446                 if eapi[:1] == '-':
1447                         eapi = eapi[1:]
1448                 if not portage.eapi_is_supported(eapi):
1449                         mreasons = ['EAPI %s' % eapi]
1450                 else:
1451                         pkg = Package(type_name=pkg_type, root_config=root_config,
1452                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1453                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1454         return metadata, mreasons
1455
1456 def show_masked_packages(masked_packages):
1457         shown_licenses = set()
1458         shown_comments = set()
1459         # Maybe there is both an ebuild and a binary. Only
1460         # show one of them to avoid redundant appearance.
1461         shown_cpvs = set()
1462         have_eapi_mask = False
1463         for (root_config, pkgsettings, cpv,
1464                 metadata, mreasons) in masked_packages:
1465                 if cpv in shown_cpvs:
1466                         continue
1467                 shown_cpvs.add(cpv)
1468                 comment, filename = None, None
1469                 if "package.mask" in mreasons:
1470                         comment, filename = \
1471                                 portage.getmaskingreason(
1472                                 cpv, metadata=metadata,
1473                                 settings=pkgsettings,
1474                                 portdb=root_config.trees["porttree"].dbapi,
1475                                 return_location=True)
1476                 missing_licenses = []
1477                 if metadata:
1478                         if not portage.eapi_is_supported(metadata["EAPI"]):
1479                                 have_eapi_mask = True
1480                         try:
1481                                 missing_licenses = \
1482                                         pkgsettings._getMissingLicenses(
1483                                                 cpv, metadata)
1484                         except portage.exception.InvalidDependString:
1485                                 # This will have already been reported
1486                                 # above via mreasons.
1487                                 pass
1488
1489                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1490                 if comment and comment not in shown_comments:
1491                         print filename+":"
1492                         print comment
1493                         shown_comments.add(comment)
1494                 portdb = root_config.trees["porttree"].dbapi
1495                 for l in missing_licenses:
1496                         l_path = portdb.findLicensePath(l)
1497                         if l in shown_licenses:
1498                                 continue
1499                         msg = ("A copy of the '%s' license" + \
1500                         " is located at '%s'.") % (l, l_path)
1501                         print msg
1502                         print
1503                         shown_licenses.add(l)
1504         return have_eapi_mask
1505
1506 class Task(SlotObject):
1507         __slots__ = ("_hash_key", "_hash_value")
1508
1509         def _get_hash_key(self):
1510                 hash_key = getattr(self, "_hash_key", None)
1511                 if hash_key is None:
1512                         raise NotImplementedError(self)
1513                 return hash_key
1514
1515         def __eq__(self, other):
1516                 return self._get_hash_key() == other
1517
1518         def __ne__(self, other):
1519                 return self._get_hash_key() != other
1520
1521         def __hash__(self):
1522                 hash_value = getattr(self, "_hash_value", None)
1523                 if hash_value is None:
1524                         self._hash_value = hash(self._get_hash_key())
1525                 return self._hash_value
1526
1527         def __len__(self):
1528                 return len(self._get_hash_key())
1529
1530         def __getitem__(self, key):
1531                 return self._get_hash_key()[key]
1532
1533         def __iter__(self):
1534                 return iter(self._get_hash_key())
1535
1536         def __contains__(self, key):
1537                 return key in self._get_hash_key()
1538
1539         def __str__(self):
1540                 return str(self._get_hash_key())
1541
1542 class Blocker(Task):
1543
1544         __hash__ = Task.__hash__
1545         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1546
1547         def __init__(self, **kwargs):
1548                 Task.__init__(self, **kwargs)
1549                 self.cp = portage.dep_getkey(self.atom)
1550
1551         def _get_hash_key(self):
1552                 hash_key = getattr(self, "_hash_key", None)
1553                 if hash_key is None:
1554                         self._hash_key = \
1555                                 ("blocks", self.root, self.atom, self.eapi)
1556                 return self._hash_key
1557
1558 class Package(Task):
1559
1560         __hash__ = Task.__hash__
1561         __slots__ = ("built", "cpv", "depth",
1562                 "installed", "metadata", "onlydeps", "operation",
1563                 "root_config", "type_name",
1564                 "category", "counter", "cp", "cpv_split",
1565                 "inherited", "iuse", "mtime",
1566                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1567
1568         metadata_keys = [
1569                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1570                 "INHERITED", "IUSE", "KEYWORDS",
1571                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1572                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1573
1574         def __init__(self, **kwargs):
1575                 Task.__init__(self, **kwargs)
1576                 self.root = self.root_config.root
1577                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1578                 self.cp = portage.cpv_getkey(self.cpv)
1579                 slot = self.slot
1580                 if not slot:
1581                         # Avoid an InvalidAtom exception when creating slot_atom.
1582                         # This package instance will be masked due to empty SLOT.
1583                         slot = '0'
1584                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1585                 self.category, self.pf = portage.catsplit(self.cpv)
1586                 self.cpv_split = portage.catpkgsplit(self.cpv)
1587                 self.pv_split = self.cpv_split[1:]
1588
1589         class _use(object):
1590
1591                 __slots__ = ("__weakref__", "enabled")
1592
1593                 def __init__(self, use):
1594                         self.enabled = frozenset(use)
1595
1596         class _iuse(object):
1597
1598                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1599
1600                 def __init__(self, tokens, iuse_implicit):
1601                         self.tokens = tuple(tokens)
1602                         self.iuse_implicit = iuse_implicit
1603                         enabled = []
1604                         disabled = []
1605                         other = []
1606                         for x in tokens:
1607                                 prefix = x[:1]
1608                                 if prefix == "+":
1609                                         enabled.append(x[1:])
1610                                 elif prefix == "-":
1611                                         disabled.append(x[1:])
1612                                 else:
1613                                         other.append(x)
1614                         self.enabled = frozenset(enabled)
1615                         self.disabled = frozenset(disabled)
1616                         self.all = frozenset(chain(enabled, disabled, other))
1617
1618                 def __getattribute__(self, name):
1619                         if name == "regex":
1620                                 try:
1621                                         return object.__getattribute__(self, "regex")
1622                                 except AttributeError:
1623                                         all = object.__getattribute__(self, "all")
1624                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1625                                         # Escape anything except ".*" which is supposed
1626                                         # to pass through from _get_implicit_iuse()
1627                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1628                                         regex = "^(%s)$" % "|".join(regex)
1629                                         regex = regex.replace("\\.\\*", ".*")
1630                                         self.regex = re.compile(regex)
1631                         return object.__getattribute__(self, name)
1632
1633         def _get_hash_key(self):
1634                 hash_key = getattr(self, "_hash_key", None)
1635                 if hash_key is None:
1636                         if self.operation is None:
1637                                 self.operation = "merge"
1638                                 if self.onlydeps or self.installed:
1639                                         self.operation = "nomerge"
1640                         self._hash_key = \
1641                                 (self.type_name, self.root, self.cpv, self.operation)
1642                 return self._hash_key
1643
1644         def __lt__(self, other):
1645                 if other.cp != self.cp:
1646                         return False
1647                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1648                         return True
1649                 return False
1650
1651         def __le__(self, other):
1652                 if other.cp != self.cp:
1653                         return False
1654                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1655                         return True
1656                 return False
1657
1658         def __gt__(self, other):
1659                 if other.cp != self.cp:
1660                         return False
1661                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1662                         return True
1663                 return False
1664
1665         def __ge__(self, other):
1666                 if other.cp != self.cp:
1667                         return False
1668                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1669                         return True
1670                 return False
1671
1672 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1673         if not x.startswith("UNUSED_"))
1674 _all_metadata_keys.discard("CDEPEND")
1675 _all_metadata_keys.update(Package.metadata_keys)
1676
1677 from portage.cache.mappings import slot_dict_class
1678 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1679
1680 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1681         """
1682         Detect metadata updates and synchronize Package attributes.
1683         """
1684
1685         __slots__ = ("_pkg",)
1686         _wrapped_keys = frozenset(
1687                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1688
1689         def __init__(self, pkg, metadata):
1690                 _PackageMetadataWrapperBase.__init__(self)
1691                 self._pkg = pkg
1692                 self.update(metadata)
1693
1694         def __setitem__(self, k, v):
1695                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1696                 if k in self._wrapped_keys:
1697                         getattr(self, "_set_" + k.lower())(k, v)
1698
1699         def _set_inherited(self, k, v):
1700                 if isinstance(v, basestring):
1701                         v = frozenset(v.split())
1702                 self._pkg.inherited = v
1703
1704         def _set_iuse(self, k, v):
1705                 self._pkg.iuse = self._pkg._iuse(
1706                         v.split(), self._pkg.root_config.iuse_implicit)
1707
1708         def _set_slot(self, k, v):
1709                 self._pkg.slot = v
1710
1711         def _set_use(self, k, v):
1712                 self._pkg.use = self._pkg._use(v.split())
1713
1714         def _set_counter(self, k, v):
1715                 if isinstance(v, basestring):
1716                         try:
1717                                 v = long(v.strip())
1718                         except ValueError:
1719                                 v = 0
1720                 self._pkg.counter = v
1721
1722         def _set__mtime_(self, k, v):
1723                 if isinstance(v, basestring):
1724                         try:
1725                                 v = long(v.strip())
1726                         except ValueError:
1727                                 v = 0
1728                 self._pkg.mtime = v
1729
1730 class EbuildFetchonly(SlotObject):
1731
1732         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1733
1734         def execute(self):
1735                 settings = self.settings
1736                 pkg = self.pkg
1737                 portdb = pkg.root_config.trees["porttree"].dbapi
1738                 ebuild_path = portdb.findname(pkg.cpv)
1739                 settings.setcpv(pkg)
1740                 debug = settings.get("PORTAGE_DEBUG") == "1"
1741                 use_cache = 1 # always true
1742                 portage.doebuild_environment(ebuild_path, "fetch",
1743                         settings["ROOT"], settings, debug, use_cache, portdb)
1744                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1745
1746                 if restrict_fetch:
1747                         rval = self._execute_with_builddir()
1748                 else:
1749                         rval = portage.doebuild(ebuild_path, "fetch",
1750                                 settings["ROOT"], settings, debug=debug,
1751                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1752                                 mydbapi=portdb, tree="porttree")
1753
1754                         if rval != os.EX_OK:
1755                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1756                                 eerror(msg, phase="unpack", key=pkg.cpv)
1757
1758                 return rval
1759
1760         def _execute_with_builddir(self):
1761                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1762                 # ensuring sane $PWD (bug #239560) and storing elog
1763                 # messages. Use a private temp directory, in order
1764                 # to avoid locking the main one.
1765                 settings = self.settings
1766                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1767                 from tempfile import mkdtemp
1768                 try:
1769                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1770                 except OSError, e:
1771                         if e.errno != portage.exception.PermissionDenied.errno:
1772                                 raise
1773                         raise portage.exception.PermissionDenied(global_tmpdir)
1774                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1775                 settings.backup_changes("PORTAGE_TMPDIR")
1776                 try:
1777                         retval = self._execute()
1778                 finally:
1779                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1780                         settings.backup_changes("PORTAGE_TMPDIR")
1781                         shutil.rmtree(private_tmpdir)
1782                 return retval
1783
1784         def _execute(self):
1785                 settings = self.settings
1786                 pkg = self.pkg
1787                 root_config = pkg.root_config
1788                 portdb = root_config.trees["porttree"].dbapi
1789                 ebuild_path = portdb.findname(pkg.cpv)
1790                 debug = settings.get("PORTAGE_DEBUG") == "1"
1791                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1792
1793                 retval = portage.doebuild(ebuild_path, "fetch",
1794                         self.settings["ROOT"], self.settings, debug=debug,
1795                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1796                         mydbapi=portdb, tree="porttree")
1797
1798                 if retval != os.EX_OK:
1799                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1800                         eerror(msg, phase="unpack", key=pkg.cpv)
1801
1802                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1803                 return retval
1804
1805 class PollConstants(object):
1806
1807         """
1808         Provides POLL* constants that are equivalent to those from the
1809         select module, for use by PollSelectAdapter.
1810         """
1811
1812         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1813         v = 1
1814         for k in names:
1815                 locals()[k] = getattr(select, k, v)
1816                 v *= 2
1817         del k, v
1818
1819 class AsynchronousTask(SlotObject):
1820         """
1821         Subclasses override _wait() and _poll() so that calls
1822         to public methods can be wrapped for implementing
1823         hooks such as exit listener notification.
1824
1825         Sublasses should call self.wait() to notify exit listeners after
1826         the task is complete and self.returncode has been set.
1827         """
1828
1829         __slots__ = ("background", "cancelled", "returncode") + \
1830                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1831
1832         def start(self):
1833                 """
1834                 Start an asynchronous task and then return as soon as possible.
1835                 """
1836                 self._start_hook()
1837                 self._start()
1838
1839         def _start(self):
1840                 raise NotImplementedError(self)
1841
1842         def isAlive(self):
1843                 return self.returncode is None
1844
1845         def poll(self):
1846                 self._wait_hook()
1847                 return self._poll()
1848
1849         def _poll(self):
1850                 return self.returncode
1851
1852         def wait(self):
1853                 if self.returncode is None:
1854                         self._wait()
1855                 self._wait_hook()
1856                 return self.returncode
1857
1858         def _wait(self):
1859                 return self.returncode
1860
1861         def cancel(self):
1862                 self.cancelled = True
1863                 self.wait()
1864
1865         def addStartListener(self, f):
1866                 """
1867                 The function will be called with one argument, a reference to self.
1868                 """
1869                 if self._start_listeners is None:
1870                         self._start_listeners = []
1871                 self._start_listeners.append(f)
1872
1873         def removeStartListener(self, f):
1874                 if self._start_listeners is None:
1875                         return
1876                 self._start_listeners.remove(f)
1877
1878         def _start_hook(self):
1879                 if self._start_listeners is not None:
1880                         start_listeners = self._start_listeners
1881                         self._start_listeners = None
1882
1883                         for f in start_listeners:
1884                                 f(self)
1885
1886         def addExitListener(self, f):
1887                 """
1888                 The function will be called with one argument, a reference to self.
1889                 """
1890                 if self._exit_listeners is None:
1891                         self._exit_listeners = []
1892                 self._exit_listeners.append(f)
1893
1894         def removeExitListener(self, f):
1895                 if self._exit_listeners is None:
1896                         if self._exit_listener_stack is not None:
1897                                 self._exit_listener_stack.remove(f)
1898                         return
1899                 self._exit_listeners.remove(f)
1900
1901         def _wait_hook(self):
1902                 """
1903                 Call this method after the task completes, just before returning
1904                 the returncode from wait() or poll(). This hook is
1905                 used to trigger exit listeners when the returncode first
1906                 becomes available.
1907                 """
1908                 if self.returncode is not None and \
1909                         self._exit_listeners is not None:
1910
1911                         # This prevents recursion, in case one of the
1912                         # exit handlers triggers this method again by
1913                         # calling wait(). Use a stack that gives
1914                         # removeExitListener() an opportunity to consume
1915                         # listeners from the stack, before they can get
1916                         # called below. This is necessary because a call
1917                         # to one exit listener may result in a call to
1918                         # removeExitListener() for another listener on
1919                         # the stack. That listener needs to be removed
1920                         # from the stack since it would be inconsistent
1921                         # to call it after it has been been passed into
1922                         # removeExitListener().
1923                         self._exit_listener_stack = self._exit_listeners
1924                         self._exit_listeners = None
1925
1926                         self._exit_listener_stack.reverse()
1927                         while self._exit_listener_stack:
1928                                 self._exit_listener_stack.pop()(self)
1929
1930 class AbstractPollTask(AsynchronousTask):
1931
1932         __slots__ = ("scheduler",) + \
1933                 ("_registered",)
1934
1935         _bufsize = 4096
1936         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1937         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1938                 _exceptional_events
1939
1940         def _unregister(self):
1941                 raise NotImplementedError(self)
1942
1943         def _unregister_if_appropriate(self, event):
1944                 if self._registered:
1945                         if event & self._exceptional_events:
1946                                 self._unregister()
1947                                 self.cancel()
1948                         elif event & PollConstants.POLLHUP:
1949                                 self._unregister()
1950                                 self.wait()
1951
1952 class PipeReader(AbstractPollTask):
1953
1954         """
1955         Reads output from one or more files and saves it in memory,
1956         for retrieval via the getvalue() method. This is driven by
1957         the scheduler's poll() loop, so it runs entirely within the
1958         current process.
1959         """
1960
1961         __slots__ = ("input_files",) + \
1962                 ("_read_data", "_reg_ids")
1963
1964         def _start(self):
1965                 self._reg_ids = set()
1966                 self._read_data = []
1967                 for k, f in self.input_files.iteritems():
1968                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1969                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1970                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1971                                 self._registered_events, self._output_handler))
1972                 self._registered = True
1973
1974         def isAlive(self):
1975                 return self._registered
1976
1977         def cancel(self):
1978                 if self.returncode is None:
1979                         self.returncode = 1
1980                         self.cancelled = True
1981                 self.wait()
1982
1983         def _wait(self):
1984                 if self.returncode is not None:
1985                         return self.returncode
1986
1987                 if self._registered:
1988                         self.scheduler.schedule(self._reg_ids)
1989                         self._unregister()
1990
1991                 self.returncode = os.EX_OK
1992                 return self.returncode
1993
1994         def getvalue(self):
1995                 """Retrieve the entire contents"""
1996                 if sys.hexversion >= 0x3000000:
1997                         return bytes().join(self._read_data)
1998                 return "".join(self._read_data)
1999
2000         def close(self):
2001                 """Free the memory buffer."""
2002                 self._read_data = None
2003
2004         def _output_handler(self, fd, event):
2005
2006                 if event & PollConstants.POLLIN:
2007
2008                         for f in self.input_files.itervalues():
2009                                 if fd == f.fileno():
2010                                         break
2011
2012                         buf = array.array('B')
2013                         try:
2014                                 buf.fromfile(f, self._bufsize)
2015                         except EOFError:
2016                                 pass
2017
2018                         if buf:
2019                                 self._read_data.append(buf.tostring())
2020                         else:
2021                                 self._unregister()
2022                                 self.wait()
2023
2024                 self._unregister_if_appropriate(event)
2025                 return self._registered
2026
2027         def _unregister(self):
2028                 """
2029                 Unregister from the scheduler and close open files.
2030                 """
2031
2032                 self._registered = False
2033
2034                 if self._reg_ids is not None:
2035                         for reg_id in self._reg_ids:
2036                                 self.scheduler.unregister(reg_id)
2037                         self._reg_ids = None
2038
2039                 if self.input_files is not None:
2040                         for f in self.input_files.itervalues():
2041                                 f.close()
2042                         self.input_files = None
2043
2044 class CompositeTask(AsynchronousTask):
2045
2046         __slots__ = ("scheduler",) + ("_current_task",)
2047
2048         def isAlive(self):
2049                 return self._current_task is not None
2050
2051         def cancel(self):
2052                 self.cancelled = True
2053                 if self._current_task is not None:
2054                         self._current_task.cancel()
2055
2056         def _poll(self):
2057                 """
2058                 This does a loop calling self._current_task.poll()
2059                 repeatedly as long as the value of self._current_task
2060                 keeps changing. It calls poll() a maximum of one time
2061                 for a given self._current_task instance. This is useful
2062                 since calling poll() on a task can trigger advance to
2063                 the next task could eventually lead to the returncode
2064                 being set in cases when polling only a single task would
2065                 not have the same effect.
2066                 """
2067
2068                 prev = None
2069                 while True:
2070                         task = self._current_task
2071                         if task is None or task is prev:
2072                                 # don't poll the same task more than once
2073                                 break
2074                         task.poll()
2075                         prev = task
2076
2077                 return self.returncode
2078
2079         def _wait(self):
2080
2081                 prev = None
2082                 while True:
2083                         task = self._current_task
2084                         if task is None:
2085                                 # don't wait for the same task more than once
2086                                 break
2087                         if task is prev:
2088                                 # Before the task.wait() method returned, an exit
2089                                 # listener should have set self._current_task to either
2090                                 # a different task or None. Something is wrong.
2091                                 raise AssertionError("self._current_task has not " + \
2092                                         "changed since calling wait", self, task)
2093                         task.wait()
2094                         prev = task
2095
2096                 return self.returncode
2097
2098         def _assert_current(self, task):
2099                 """
2100                 Raises an AssertionError if the given task is not the
2101                 same one as self._current_task. This can be useful
2102                 for detecting bugs.
2103                 """
2104                 if task is not self._current_task:
2105                         raise AssertionError("Unrecognized task: %s" % (task,))
2106
2107         def _default_exit(self, task):
2108                 """
2109                 Calls _assert_current() on the given task and then sets the
2110                 composite returncode attribute if task.returncode != os.EX_OK.
2111                 If the task failed then self._current_task will be set to None.
2112                 Subclasses can use this as a generic task exit callback.
2113
2114                 @rtype: int
2115                 @returns: The task.returncode attribute.
2116                 """
2117                 self._assert_current(task)
2118                 if task.returncode != os.EX_OK:
2119                         self.returncode = task.returncode
2120                         self._current_task = None
2121                 return task.returncode
2122
2123         def _final_exit(self, task):
2124                 """
2125                 Assumes that task is the final task of this composite task.
2126                 Calls _default_exit() and sets self.returncode to the task's
2127                 returncode and sets self._current_task to None.
2128                 """
2129                 self._default_exit(task)
2130                 self._current_task = None
2131                 self.returncode = task.returncode
2132                 return self.returncode
2133
2134         def _default_final_exit(self, task):
2135                 """
2136                 This calls _final_exit() and then wait().
2137
2138                 Subclasses can use this as a generic final task exit callback.
2139
2140                 """
2141                 self._final_exit(task)
2142                 return self.wait()
2143
2144         def _start_task(self, task, exit_handler):
2145                 """
2146                 Register exit handler for the given task, set it
2147                 as self._current_task, and call task.start().
2148
2149                 Subclasses can use this as a generic way to start
2150                 a task.
2151
2152                 """
2153                 task.addExitListener(exit_handler)
2154                 self._current_task = task
2155                 task.start()
2156
2157 class TaskSequence(CompositeTask):
2158         """
2159         A collection of tasks that executes sequentially. Each task
2160         must have a addExitListener() method that can be used as
2161         a means to trigger movement from one task to the next.
2162         """
2163
2164         __slots__ = ("_task_queue",)
2165
2166         def __init__(self, **kwargs):
2167                 AsynchronousTask.__init__(self, **kwargs)
2168                 self._task_queue = deque()
2169
2170         def add(self, task):
2171                 self._task_queue.append(task)
2172
2173         def _start(self):
2174                 self._start_next_task()
2175
2176         def cancel(self):
2177                 self._task_queue.clear()
2178                 CompositeTask.cancel(self)
2179
2180         def _start_next_task(self):
2181                 self._start_task(self._task_queue.popleft(),
2182                         self._task_exit_handler)
2183
2184         def _task_exit_handler(self, task):
2185                 if self._default_exit(task) != os.EX_OK:
2186                         self.wait()
2187                 elif self._task_queue:
2188                         self._start_next_task()
2189                 else:
2190                         self._final_exit(task)
2191                         self.wait()
2192
2193 class SubProcess(AbstractPollTask):
2194
2195         __slots__ = ("pid",) + \
2196                 ("_files", "_reg_id")
2197
2198         # A file descriptor is required for the scheduler to monitor changes from
2199         # inside a poll() loop. When logging is not enabled, create a pipe just to
2200         # serve this purpose alone.
2201         _dummy_pipe_fd = 9
2202
2203         def _poll(self):
2204                 if self.returncode is not None:
2205                         return self.returncode
2206                 if self.pid is None:
2207                         return self.returncode
2208                 if self._registered:
2209                         return self.returncode
2210
2211                 try:
2212                         retval = os.waitpid(self.pid, os.WNOHANG)
2213                 except OSError, e:
2214                         if e.errno != errno.ECHILD:
2215                                 raise
2216                         del e
2217                         retval = (self.pid, 1)
2218
2219                 if retval == (0, 0):
2220                         return None
2221                 self._set_returncode(retval)
2222                 return self.returncode
2223
2224         def cancel(self):
2225                 if self.isAlive():
2226                         try:
2227                                 os.kill(self.pid, signal.SIGTERM)
2228                         except OSError, e:
2229                                 if e.errno != errno.ESRCH:
2230                                         raise
2231                                 del e
2232
2233                 self.cancelled = True
2234                 if self.pid is not None:
2235                         self.wait()
2236                 return self.returncode
2237
2238         def isAlive(self):
2239                 return self.pid is not None and \
2240                         self.returncode is None
2241
2242         def _wait(self):
2243
2244                 if self.returncode is not None:
2245                         return self.returncode
2246
2247                 if self._registered:
2248                         self.scheduler.schedule(self._reg_id)
2249                         self._unregister()
2250                         if self.returncode is not None:
2251                                 return self.returncode
2252
2253                 try:
2254                         wait_retval = os.waitpid(self.pid, 0)
2255                 except OSError, e:
2256                         if e.errno != errno.ECHILD:
2257                                 raise
2258                         del e
2259                         self._set_returncode((self.pid, 1))
2260                 else:
2261                         self._set_returncode(wait_retval)
2262
2263                 return self.returncode
2264
2265         def _unregister(self):
2266                 """
2267                 Unregister from the scheduler and close open files.
2268                 """
2269
2270                 self._registered = False
2271
2272                 if self._reg_id is not None:
2273                         self.scheduler.unregister(self._reg_id)
2274                         self._reg_id = None
2275
2276                 if self._files is not None:
2277                         for f in self._files.itervalues():
2278                                 f.close()
2279                         self._files = None
2280
2281         def _set_returncode(self, wait_retval):
2282
2283                 retval = wait_retval[1]
2284
2285                 if retval != os.EX_OK:
2286                         if retval & 0xff:
2287                                 retval = (retval & 0xff) << 8
2288                         else:
2289                                 retval = retval >> 8
2290
2291                 self.returncode = retval
2292
2293 class SpawnProcess(SubProcess):
2294
2295         """
2296         Constructor keyword args are passed into portage.process.spawn().
2297         The required "args" keyword argument will be passed as the first
2298         spawn() argument.
2299         """
2300
2301         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2302                 "uid", "gid", "groups", "umask", "logfile",
2303                 "path_lookup", "pre_exec")
2304
2305         __slots__ = ("args",) + \
2306                 _spawn_kwarg_names
2307
2308         _file_names = ("log", "process", "stdout")
2309         _files_dict = slot_dict_class(_file_names, prefix="")
2310
2311         def _start(self):
2312
2313                 if self.cancelled:
2314                         return
2315
2316                 if self.fd_pipes is None:
2317                         self.fd_pipes = {}
2318                 fd_pipes = self.fd_pipes
2319                 fd_pipes.setdefault(0, sys.stdin.fileno())
2320                 fd_pipes.setdefault(1, sys.stdout.fileno())
2321                 fd_pipes.setdefault(2, sys.stderr.fileno())
2322
2323                 # flush any pending output
2324                 for fd in fd_pipes.itervalues():
2325                         if fd == sys.stdout.fileno():
2326                                 sys.stdout.flush()
2327                         if fd == sys.stderr.fileno():
2328                                 sys.stderr.flush()
2329
2330                 logfile = self.logfile
2331                 self._files = self._files_dict()
2332                 files = self._files
2333
2334                 master_fd, slave_fd = self._pipe(fd_pipes)
2335                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2336                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2337
2338                 null_input = None
2339                 fd_pipes_orig = fd_pipes.copy()
2340                 if self.background:
2341                         # TODO: Use job control functions like tcsetpgrp() to control
2342                         # access to stdin. Until then, use /dev/null so that any
2343                         # attempts to read from stdin will immediately return EOF
2344                         # instead of blocking indefinitely.
2345                         null_input = open('/dev/null', 'rb')
2346                         fd_pipes[0] = null_input.fileno()
2347                 else:
2348                         fd_pipes[0] = fd_pipes_orig[0]
2349
2350                 files.process = os.fdopen(master_fd, 'rb')
2351                 if logfile is not None:
2352
2353                         fd_pipes[1] = slave_fd
2354                         fd_pipes[2] = slave_fd
2355
2356                         files.log = open(logfile, mode='ab')
2357                         portage.util.apply_secpass_permissions(logfile,
2358                                 uid=portage.portage_uid, gid=portage.portage_gid,
2359                                 mode=0660)
2360
2361                         if not self.background:
2362                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2363
2364                         output_handler = self._output_handler
2365
2366                 else:
2367
2368                         # Create a dummy pipe so the scheduler can monitor
2369                         # the process from inside a poll() loop.
2370                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2371                         if self.background:
2372                                 fd_pipes[1] = slave_fd
2373                                 fd_pipes[2] = slave_fd
2374                         output_handler = self._dummy_handler
2375
2376                 kwargs = {}
2377                 for k in self._spawn_kwarg_names:
2378                         v = getattr(self, k)
2379                         if v is not None:
2380                                 kwargs[k] = v
2381
2382                 kwargs["fd_pipes"] = fd_pipes
2383                 kwargs["returnpid"] = True
2384                 kwargs.pop("logfile", None)
2385
2386                 self._reg_id = self.scheduler.register(files.process.fileno(),
2387                         self._registered_events, output_handler)
2388                 self._registered = True
2389
2390                 retval = self._spawn(self.args, **kwargs)
2391
2392                 os.close(slave_fd)
2393                 if null_input is not None:
2394                         null_input.close()
2395
2396                 if isinstance(retval, int):
2397                         # spawn failed
2398                         self._unregister()
2399                         self.returncode = retval
2400                         self.wait()
2401                         return
2402
2403                 self.pid = retval[0]
2404                 portage.process.spawned_pids.remove(self.pid)
2405
2406         def _pipe(self, fd_pipes):
2407                 """
2408                 @type fd_pipes: dict
2409                 @param fd_pipes: pipes from which to copy terminal size if desired.
2410                 """
2411                 return os.pipe()
2412
2413         def _spawn(self, args, **kwargs):
2414                 return portage.process.spawn(args, **kwargs)
2415
2416         def _output_handler(self, fd, event):
2417
2418                 if event & PollConstants.POLLIN:
2419
2420                         files = self._files
2421                         buf = array.array('B')
2422                         try:
2423                                 buf.fromfile(files.process, self._bufsize)
2424                         except EOFError:
2425                                 pass
2426
2427                         if buf:
2428                                 if not self.background:
2429                                         buf.tofile(files.stdout)
2430                                         files.stdout.flush()
2431                                 buf.tofile(files.log)
2432                                 files.log.flush()
2433                         else:
2434                                 self._unregister()
2435                                 self.wait()
2436
2437                 self._unregister_if_appropriate(event)
2438                 return self._registered
2439
2440         def _dummy_handler(self, fd, event):
2441                 """
2442                 This method is mainly interested in detecting EOF, since
2443                 the only purpose of the pipe is to allow the scheduler to
2444                 monitor the process from inside a poll() loop.
2445                 """
2446
2447                 if event & PollConstants.POLLIN:
2448
2449                         buf = array.array('B')
2450                         try:
2451                                 buf.fromfile(self._files.process, self._bufsize)
2452                         except EOFError:
2453                                 pass
2454
2455                         if buf:
2456                                 pass
2457                         else:
2458                                 self._unregister()
2459                                 self.wait()
2460
2461                 self._unregister_if_appropriate(event)
2462                 return self._registered
2463
2464 class MiscFunctionsProcess(SpawnProcess):
2465         """
2466         Spawns misc-functions.sh with an existing ebuild environment.
2467         """
2468
2469         __slots__ = ("commands", "phase", "pkg", "settings")
2470
2471         def _start(self):
2472                 settings = self.settings
2473                 settings.pop("EBUILD_PHASE", None)
2474                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2475                 misc_sh_binary = os.path.join(portage_bin_path,
2476                         os.path.basename(portage.const.MISC_SH_BINARY))
2477
2478                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2479                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2480
2481                 portage._doebuild_exit_status_unlink(
2482                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2483
2484                 SpawnProcess._start(self)
2485
2486         def _spawn(self, args, **kwargs):
2487                 settings = self.settings
2488                 debug = settings.get("PORTAGE_DEBUG") == "1"
2489                 return portage.spawn(" ".join(args), settings,
2490                         debug=debug, **kwargs)
2491
2492         def _set_returncode(self, wait_retval):
2493                 SpawnProcess._set_returncode(self, wait_retval)
2494                 self.returncode = portage._doebuild_exit_status_check_and_log(
2495                         self.settings, self.phase, self.returncode)
2496
2497 class EbuildFetcher(SpawnProcess):
2498
2499         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2500                 ("_build_dir",)
2501
2502         def _start(self):
2503
2504                 root_config = self.pkg.root_config
2505                 portdb = root_config.trees["porttree"].dbapi
2506                 ebuild_path = portdb.findname(self.pkg.cpv)
2507                 settings = self.config_pool.allocate()
2508                 settings.setcpv(self.pkg)
2509
2510                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2511                 # should not be touched since otherwise it could interfere with
2512                 # another instance of the same cpv concurrently being built for a
2513                 # different $ROOT (currently, builds only cooperate with prefetchers
2514                 # that are spawned for the same $ROOT).
2515                 if not self.prefetch:
2516                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2517                         self._build_dir.lock()
2518                         self._build_dir.clean_log()
2519                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2520                         if self.logfile is None:
2521                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2522
2523                 phase = "fetch"
2524                 if self.fetchall:
2525                         phase = "fetchall"
2526
2527                 # If any incremental variables have been overridden
2528                 # via the environment, those values need to be passed
2529                 # along here so that they are correctly considered by
2530                 # the config instance in the subproccess.
2531                 fetch_env = os.environ.copy()
2532
2533                 nocolor = settings.get("NOCOLOR")
2534                 if nocolor is not None:
2535                         fetch_env["NOCOLOR"] = nocolor
2536
2537                 fetch_env["PORTAGE_NICENESS"] = "0"
2538                 if self.prefetch:
2539                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2540
2541                 ebuild_binary = os.path.join(
2542                         settings["PORTAGE_BIN_PATH"], "ebuild")
2543
2544                 fetch_args = [ebuild_binary, ebuild_path, phase]
2545                 debug = settings.get("PORTAGE_DEBUG") == "1"
2546                 if debug:
2547                         fetch_args.append("--debug")
2548
2549                 self.args = fetch_args
2550                 self.env = fetch_env
2551                 SpawnProcess._start(self)
2552
2553         def _pipe(self, fd_pipes):
2554                 """When appropriate, use a pty so that fetcher progress bars,
2555                 like wget has, will work properly."""
2556                 if self.background or not sys.stdout.isatty():
2557                         # When the output only goes to a log file,
2558                         # there's no point in creating a pty.
2559                         return os.pipe()
2560                 stdout_pipe = fd_pipes.get(1)
2561                 got_pty, master_fd, slave_fd = \
2562                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2563                 return (master_fd, slave_fd)
2564
2565         def _set_returncode(self, wait_retval):
2566                 SpawnProcess._set_returncode(self, wait_retval)
2567                 # Collect elog messages that might have been
2568                 # created by the pkg_nofetch phase.
2569                 if self._build_dir is not None:
2570                         # Skip elog messages for prefetch, in order to avoid duplicates.
2571                         if not self.prefetch and self.returncode != os.EX_OK:
2572                                 elog_out = None
2573                                 if self.logfile is not None:
2574                                         if self.background:
2575                                                 elog_out = open(self.logfile, 'a')
2576                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2577                                 if self.logfile is not None:
2578                                         msg += ", Log file:"
2579                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2580                                 if self.logfile is not None:
2581                                         eerror(" '%s'" % (self.logfile,),
2582                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2583                                 if elog_out is not None:
2584                                         elog_out.close()
2585                         if not self.prefetch:
2586                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2587                         features = self._build_dir.settings.features
2588                         if self.returncode == os.EX_OK:
2589                                 self._build_dir.clean_log()
2590                         self._build_dir.unlock()
2591                         self.config_pool.deallocate(self._build_dir.settings)
2592                         self._build_dir = None
2593
2594 class EbuildBuildDir(SlotObject):
2595
2596         __slots__ = ("dir_path", "pkg", "settings",
2597                 "locked", "_catdir", "_lock_obj")
2598
2599         def __init__(self, **kwargs):
2600                 SlotObject.__init__(self, **kwargs)
2601                 self.locked = False
2602
2603         def lock(self):
2604                 """
2605                 This raises an AlreadyLocked exception if lock() is called
2606                 while a lock is already held. In order to avoid this, call
2607                 unlock() or check whether the "locked" attribute is True
2608                 or False before calling lock().
2609                 """
2610                 if self._lock_obj is not None:
2611                         raise self.AlreadyLocked((self._lock_obj,))
2612
2613                 dir_path = self.dir_path
2614                 if dir_path is None:
2615                         root_config = self.pkg.root_config
2616                         portdb = root_config.trees["porttree"].dbapi
2617                         ebuild_path = portdb.findname(self.pkg.cpv)
2618                         settings = self.settings
2619                         settings.setcpv(self.pkg)
2620                         debug = settings.get("PORTAGE_DEBUG") == "1"
2621                         use_cache = 1 # always true
2622                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2623                                 self.settings, debug, use_cache, portdb)
2624                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2625
2626                 catdir = os.path.dirname(dir_path)
2627                 self._catdir = catdir
2628
2629                 portage.util.ensure_dirs(os.path.dirname(catdir),
2630                         gid=portage.portage_gid,
2631                         mode=070, mask=0)
2632                 catdir_lock = None
2633                 try:
2634                         catdir_lock = portage.locks.lockdir(catdir)
2635                         portage.util.ensure_dirs(catdir,
2636                                 gid=portage.portage_gid,
2637                                 mode=070, mask=0)
2638                         self._lock_obj = portage.locks.lockdir(dir_path)
2639                 finally:
2640                         self.locked = self._lock_obj is not None
2641                         if catdir_lock is not None:
2642                                 portage.locks.unlockdir(catdir_lock)
2643
2644         def clean_log(self):
2645                 """Discard existing log."""
2646                 settings = self.settings
2647
2648                 for x in ('.logid', 'temp/build.log'):
2649                         try:
2650                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2651                         except OSError:
2652                                 pass
2653
2654         def unlock(self):
2655                 if self._lock_obj is None:
2656                         return
2657
2658                 portage.locks.unlockdir(self._lock_obj)
2659                 self._lock_obj = None
2660                 self.locked = False
2661
2662                 catdir = self._catdir
2663                 catdir_lock = None
2664                 try:
2665                         catdir_lock = portage.locks.lockdir(catdir)
2666                 finally:
2667                         if catdir_lock:
2668                                 try:
2669                                         os.rmdir(catdir)
2670                                 except OSError, e:
2671                                         if e.errno not in (errno.ENOENT,
2672                                                 errno.ENOTEMPTY, errno.EEXIST):
2673                                                 raise
2674                                         del e
2675                                 portage.locks.unlockdir(catdir_lock)
2676
2677         class AlreadyLocked(portage.exception.PortageException):
2678                 pass
2679
2680 class EbuildBuild(CompositeTask):
2681
2682         __slots__ = ("args_set", "config_pool", "find_blockers",
2683                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2684                 "prefetcher", "settings", "world_atom") + \
2685                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2686
2687         def _start(self):
2688
2689                 logger = self.logger
2690                 opts = self.opts
2691                 pkg = self.pkg
2692                 settings = self.settings
2693                 world_atom = self.world_atom
2694                 root_config = pkg.root_config
2695                 tree = "porttree"
2696                 self._tree = tree
2697                 portdb = root_config.trees[tree].dbapi
2698                 settings.setcpv(pkg)
2699                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2700                 ebuild_path = portdb.findname(self.pkg.cpv)
2701                 self._ebuild_path = ebuild_path
2702
2703                 prefetcher = self.prefetcher
2704                 if prefetcher is None:
2705                         pass
2706                 elif not prefetcher.isAlive():
2707                         prefetcher.cancel()
2708                 elif prefetcher.poll() is None:
2709
2710                         waiting_msg = "Fetching files " + \
2711                                 "in the background. " + \
2712                                 "To view fetch progress, run `tail -f " + \
2713                                 "/var/log/emerge-fetch.log` in another " + \
2714                                 "terminal."
2715                         msg_prefix = colorize("GOOD", " * ")
2716                         from textwrap import wrap
2717                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2718                                 for line in wrap(waiting_msg, 65))
2719                         if not self.background:
2720                                 writemsg(waiting_msg, noiselevel=-1)
2721
2722                         self._current_task = prefetcher
2723                         prefetcher.addExitListener(self._prefetch_exit)
2724                         return
2725
2726                 self._prefetch_exit(prefetcher)
2727
2728         def _prefetch_exit(self, prefetcher):
2729
2730                 opts = self.opts
2731                 pkg = self.pkg
2732                 settings = self.settings
2733
2734                 if opts.fetchonly:
2735                                 fetcher = EbuildFetchonly(
2736                                         fetch_all=opts.fetch_all_uri,
2737                                         pkg=pkg, pretend=opts.pretend,
2738                                         settings=settings)
2739                                 retval = fetcher.execute()
2740                                 self.returncode = retval
2741                                 self.wait()
2742                                 return
2743
2744                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2745                         fetchall=opts.fetch_all_uri,
2746                         fetchonly=opts.fetchonly,
2747                         background=self.background,
2748                         pkg=pkg, scheduler=self.scheduler)
2749
2750                 self._start_task(fetcher, self._fetch_exit)
2751
2752         def _fetch_exit(self, fetcher):
2753                 opts = self.opts
2754                 pkg = self.pkg
2755
2756                 fetch_failed = False
2757                 if opts.fetchonly:
2758                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2759                 else:
2760                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2761
2762                 if fetch_failed and fetcher.logfile is not None and \
2763                         os.path.exists(fetcher.logfile):
2764                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2765
2766                 if not fetch_failed and fetcher.logfile is not None:
2767                         # Fetch was successful, so remove the fetch log.
2768                         try:
2769                                 os.unlink(fetcher.logfile)
2770                         except OSError:
2771                                 pass
2772
2773                 if fetch_failed or opts.fetchonly:
2774                         self.wait()
2775                         return
2776
2777                 logger = self.logger
2778                 opts = self.opts
2779                 pkg_count = self.pkg_count
2780                 scheduler = self.scheduler
2781                 settings = self.settings
2782                 features = settings.features
2783                 ebuild_path = self._ebuild_path
2784                 system_set = pkg.root_config.sets["system"]
2785
2786                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2787                 self._build_dir.lock()
2788
2789                 # Cleaning is triggered before the setup
2790                 # phase, in portage.doebuild().
2791                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2792                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2793                 short_msg = "emerge: (%s of %s) %s Clean" % \
2794                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2795                 logger.log(msg, short_msg=short_msg)
2796
2797                 #buildsyspkg: Check if we need to _force_ binary package creation
2798                 self._issyspkg = "buildsyspkg" in features and \
2799                                 system_set.findAtomForPackage(pkg) and \
2800                                 not opts.buildpkg
2801
2802                 if opts.buildpkg or self._issyspkg:
2803
2804                         self._buildpkg = True
2805
2806                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2807                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2808                         short_msg = "emerge: (%s of %s) %s Compile" % \
2809                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2810                         logger.log(msg, short_msg=short_msg)
2811
2812                 else:
2813                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2814                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2815                         short_msg = "emerge: (%s of %s) %s Compile" % \
2816                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2817                         logger.log(msg, short_msg=short_msg)
2818
2819                 build = EbuildExecuter(background=self.background, pkg=pkg,
2820                         scheduler=scheduler, settings=settings)
2821                 self._start_task(build, self._build_exit)
2822
2823         def _unlock_builddir(self):
2824                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2825                 self._build_dir.unlock()
2826
2827         def _build_exit(self, build):
2828                 if self._default_exit(build) != os.EX_OK:
2829                         self._unlock_builddir()
2830                         self.wait()
2831                         return
2832
2833                 opts = self.opts
2834                 buildpkg = self._buildpkg
2835
2836                 if not buildpkg:
2837                         self._final_exit(build)
2838                         self.wait()
2839                         return
2840
2841                 if self._issyspkg:
2842                         msg = ">>> This is a system package, " + \
2843                                 "let's pack a rescue tarball.\n"
2844
2845                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2846                         if log_path is not None:
2847                                 log_file = open(log_path, 'a')
2848                                 try:
2849                                         log_file.write(msg)
2850                                 finally:
2851                                         log_file.close()
2852
2853                         if not self.background:
2854                                 portage.writemsg_stdout(msg, noiselevel=-1)
2855
2856                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2857                         scheduler=self.scheduler, settings=self.settings)
2858
2859                 self._start_task(packager, self._buildpkg_exit)
2860
2861         def _buildpkg_exit(self, packager):
2862                 """
2863                 Released build dir lock when there is a failure or
2864                 when in buildpkgonly mode. Otherwise, the lock will
2865                 be released when merge() is called.
2866                 """
2867
2868                 if self._default_exit(packager) != os.EX_OK:
2869                         self._unlock_builddir()
2870                         self.wait()
2871                         return
2872
2873                 if self.opts.buildpkgonly:
2874                         # Need to call "clean" phase for buildpkgonly mode
2875                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2876                         phase = "clean"
2877                         clean_phase = EbuildPhase(background=self.background,
2878                                 pkg=self.pkg, phase=phase,
2879                                 scheduler=self.scheduler, settings=self.settings,
2880                                 tree=self._tree)
2881                         self._start_task(clean_phase, self._clean_exit)
2882                         return
2883
2884                 # Continue holding the builddir lock until
2885                 # after the package has been installed.
2886                 self._current_task = None
2887                 self.returncode = packager.returncode
2888                 self.wait()
2889
2890         def _clean_exit(self, clean_phase):
2891                 if self._final_exit(clean_phase) != os.EX_OK or \
2892                         self.opts.buildpkgonly:
2893                         self._unlock_builddir()
2894                 self.wait()
2895
2896         def install(self):
2897                 """
2898                 Install the package and then clean up and release locks.
2899                 Only call this after the build has completed successfully
2900                 and neither fetchonly nor buildpkgonly mode are enabled.
2901                 """
2902
2903                 find_blockers = self.find_blockers
2904                 ldpath_mtimes = self.ldpath_mtimes
2905                 logger = self.logger
2906                 pkg = self.pkg
2907                 pkg_count = self.pkg_count
2908                 settings = self.settings
2909                 world_atom = self.world_atom
2910                 ebuild_path = self._ebuild_path
2911                 tree = self._tree
2912
2913                 merge = EbuildMerge(find_blockers=self.find_blockers,
2914                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2915                         pkg_count=pkg_count, pkg_path=ebuild_path,
2916                         scheduler=self.scheduler,
2917                         settings=settings, tree=tree, world_atom=world_atom)
2918
2919                 msg = " === (%s of %s) Merging (%s::%s)" % \
2920                         (pkg_count.curval, pkg_count.maxval,
2921                         pkg.cpv, ebuild_path)
2922                 short_msg = "emerge: (%s of %s) %s Merge" % \
2923                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2924                 logger.log(msg, short_msg=short_msg)
2925
2926                 try:
2927                         rval = merge.execute()
2928                 finally:
2929                         self._unlock_builddir()
2930
2931                 return rval
2932
2933 class EbuildExecuter(CompositeTask):
2934
2935         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2936
2937         _phases = ("prepare", "configure", "compile", "test", "install")
2938
2939         _live_eclasses = frozenset([
2940                 "bzr",
2941                 "cvs",
2942                 "darcs",
2943                 "git",
2944                 "mercurial",
2945                 "subversion"
2946         ])
2947
2948         def _start(self):
2949                 self._tree = "porttree"
2950                 pkg = self.pkg
2951                 phase = "clean"
2952                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2953                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2954                 self._start_task(clean_phase, self._clean_phase_exit)
2955
2956         def _clean_phase_exit(self, clean_phase):
2957
2958                 if self._default_exit(clean_phase) != os.EX_OK:
2959                         self.wait()
2960                         return
2961
2962                 pkg = self.pkg
2963                 scheduler = self.scheduler
2964                 settings = self.settings
2965                 cleanup = 1
2966
2967                 # This initializes PORTAGE_LOG_FILE.
2968                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2969
2970                 setup_phase = EbuildPhase(background=self.background,
2971                         pkg=pkg, phase="setup", scheduler=scheduler,
2972                         settings=settings, tree=self._tree)
2973
2974                 setup_phase.addExitListener(self._setup_exit)
2975                 self._current_task = setup_phase
2976                 self.scheduler.scheduleSetup(setup_phase)
2977
2978         def _setup_exit(self, setup_phase):
2979
2980                 if self._default_exit(setup_phase) != os.EX_OK:
2981                         self.wait()
2982                         return
2983
2984                 unpack_phase = EbuildPhase(background=self.background,
2985                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2986                         settings=self.settings, tree=self._tree)
2987
2988                 if self._live_eclasses.intersection(self.pkg.inherited):
2989                         # Serialize $DISTDIR access for live ebuilds since
2990                         # otherwise they can interfere with eachother.
2991
2992                         unpack_phase.addExitListener(self._unpack_exit)
2993                         self._current_task = unpack_phase
2994                         self.scheduler.scheduleUnpack(unpack_phase)
2995
2996                 else:
2997                         self._start_task(unpack_phase, self._unpack_exit)
2998
2999         def _unpack_exit(self, unpack_phase):
3000
3001                 if self._default_exit(unpack_phase) != os.EX_OK:
3002                         self.wait()
3003                         return
3004
3005                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3006
3007                 pkg = self.pkg
3008                 phases = self._phases
3009                 eapi = pkg.metadata["EAPI"]
3010                 if eapi in ("0", "1"):
3011                         # skip src_prepare and src_configure
3012                         phases = phases[2:]
3013
3014                 for phase in phases:
3015                         ebuild_phases.add(EbuildPhase(background=self.background,
3016                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3017                                 settings=self.settings, tree=self._tree))
3018
3019                 self._start_task(ebuild_phases, self._default_final_exit)
3020
3021 class EbuildMetadataPhase(SubProcess):
3022
3023         """
3024         Asynchronous interface for the ebuild "depend" phase which is
3025         used to extract metadata from the ebuild.
3026         """
3027
3028         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3029                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3030                 ("_raw_metadata",)
3031
3032         _file_names = ("ebuild",)
3033         _files_dict = slot_dict_class(_file_names, prefix="")
3034         _metadata_fd = 9
3035
3036         def _start(self):
3037                 settings = self.settings
3038                 settings.setcpv(self.cpv)
3039                 ebuild_path = self.ebuild_path
3040
3041                 eapi = None
3042                 if 'parse-eapi-glep-55' in settings.features:
3043                         pf, eapi = portage._split_ebuild_name_glep55(
3044                                 os.path.basename(ebuild_path))
3045                 if eapi is None and \
3046                         'parse-eapi-ebuild-head' in settings.features:
3047                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3048                                 mode='r', encoding='utf_8', errors='replace'))
3049
3050                 if eapi is not None:
3051                         if not portage.eapi_is_supported(eapi):
3052                                 self.metadata_callback(self.cpv, self.ebuild_path,
3053                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3054                                 self.returncode = os.EX_OK
3055                                 self.wait()
3056                                 return
3057
3058                         settings.configdict['pkg']['EAPI'] = eapi
3059
3060                 debug = settings.get("PORTAGE_DEBUG") == "1"
3061                 master_fd = None
3062                 slave_fd = None
3063                 fd_pipes = None
3064                 if self.fd_pipes is not None:
3065                         fd_pipes = self.fd_pipes.copy()
3066                 else:
3067                         fd_pipes = {}
3068
3069                 fd_pipes.setdefault(0, sys.stdin.fileno())
3070                 fd_pipes.setdefault(1, sys.stdout.fileno())
3071                 fd_pipes.setdefault(2, sys.stderr.fileno())
3072
3073                 # flush any pending output
3074                 for fd in fd_pipes.itervalues():
3075                         if fd == sys.stdout.fileno():
3076                                 sys.stdout.flush()
3077                         if fd == sys.stderr.fileno():
3078                                 sys.stderr.flush()
3079
3080                 fd_pipes_orig = fd_pipes.copy()
3081                 self._files = self._files_dict()
3082                 files = self._files
3083
3084                 master_fd, slave_fd = os.pipe()
3085                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3086                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3087
3088                 fd_pipes[self._metadata_fd] = slave_fd
3089
3090                 self._raw_metadata = []
3091                 files.ebuild = os.fdopen(master_fd, 'r')
3092                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3093                         self._registered_events, self._output_handler)
3094                 self._registered = True
3095
3096                 retval = portage.doebuild(ebuild_path, "depend",
3097                         settings["ROOT"], settings, debug,
3098                         mydbapi=self.portdb, tree="porttree",
3099                         fd_pipes=fd_pipes, returnpid=True)
3100
3101                 os.close(slave_fd)
3102
3103                 if isinstance(retval, int):
3104                         # doebuild failed before spawning
3105                         self._unregister()
3106                         self.returncode = retval
3107                         self.wait()
3108                         return
3109
3110                 self.pid = retval[0]
3111                 portage.process.spawned_pids.remove(self.pid)
3112
3113         def _output_handler(self, fd, event):
3114
3115                 if event & PollConstants.POLLIN:
3116                         self._raw_metadata.append(self._files.ebuild.read())
3117                         if not self._raw_metadata[-1]:
3118                                 self._unregister()
3119                                 self.wait()
3120
3121                 self._unregister_if_appropriate(event)
3122                 return self._registered
3123
3124         def _set_returncode(self, wait_retval):
3125                 SubProcess._set_returncode(self, wait_retval)
3126                 if self.returncode == os.EX_OK:
3127                         metadata_lines = "".join(self._raw_metadata).splitlines()
3128                         if len(portage.auxdbkeys) != len(metadata_lines):
3129                                 # Don't trust bash's returncode if the
3130                                 # number of lines is incorrect.
3131                                 self.returncode = 1
3132                         else:
3133                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3134                                 self.metadata_callback(self.cpv, self.ebuild_path,
3135                                         self.repo_path, metadata, self.ebuild_mtime)
3136
3137 class EbuildProcess(SpawnProcess):
3138
3139         __slots__ = ("phase", "pkg", "settings", "tree")
3140
3141         def _start(self):
3142                 # Don't open the log file during the clean phase since the
3143                 # open file can result in an nfs lock on $T/build.log which
3144                 # prevents the clean phase from removing $T.
3145                 if self.phase not in ("clean", "cleanrm"):
3146                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3147                 SpawnProcess._start(self)
3148
3149         def _pipe(self, fd_pipes):
3150                 stdout_pipe = fd_pipes.get(1)
3151                 got_pty, master_fd, slave_fd = \
3152                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3153                 return (master_fd, slave_fd)
3154
3155         def _spawn(self, args, **kwargs):
3156
3157                 root_config = self.pkg.root_config
3158                 tree = self.tree
3159                 mydbapi = root_config.trees[tree].dbapi
3160                 settings = self.settings
3161                 ebuild_path = settings["EBUILD"]
3162                 debug = settings.get("PORTAGE_DEBUG") == "1"
3163
3164                 rval = portage.doebuild(ebuild_path, self.phase,
3165                         root_config.root, settings, debug,
3166                         mydbapi=mydbapi, tree=tree, **kwargs)
3167
3168                 return rval
3169
3170         def _set_returncode(self, wait_retval):
3171                 SpawnProcess._set_returncode(self, wait_retval)
3172
3173                 if self.phase not in ("clean", "cleanrm"):
3174                         self.returncode = portage._doebuild_exit_status_check_and_log(
3175                                 self.settings, self.phase, self.returncode)
3176
3177                 if self.phase == "test" and self.returncode != os.EX_OK and \
3178                         "test-fail-continue" in self.settings.features:
3179                         self.returncode = os.EX_OK
3180
3181                 portage._post_phase_userpriv_perms(self.settings)
3182
3183 class EbuildPhase(CompositeTask):
3184
3185         __slots__ = ("background", "pkg", "phase",
3186                 "scheduler", "settings", "tree")
3187
3188         _post_phase_cmds = portage._post_phase_cmds
3189
3190         def _start(self):
3191
3192                 ebuild_process = EbuildProcess(background=self.background,
3193                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3194                         settings=self.settings, tree=self.tree)
3195
3196                 self._start_task(ebuild_process, self._ebuild_exit)
3197
3198         def _ebuild_exit(self, ebuild_process):
3199
3200                 if self.phase == "install":
3201                         out = None
3202                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3203                         log_file = None
3204                         if self.background and log_path is not None:
3205                                 log_file = open(log_path, 'a')
3206                                 out = log_file
3207                         try:
3208                                 portage._check_build_log(self.settings, out=out)
3209                         finally:
3210                                 if log_file is not None:
3211                                         log_file.close()
3212
3213                 if self._default_exit(ebuild_process) != os.EX_OK:
3214                         self.wait()
3215                         return
3216
3217                 settings = self.settings
3218
3219                 if self.phase == "install":
3220                         portage._post_src_install_chost_fix(settings)
3221                         portage._post_src_install_uid_fix(settings)
3222
3223                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3224                 if post_phase_cmds is not None:
3225                         post_phase = MiscFunctionsProcess(background=self.background,
3226                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3227                                 scheduler=self.scheduler, settings=settings)
3228                         self._start_task(post_phase, self._post_phase_exit)
3229                         return
3230
3231                 self.returncode = ebuild_process.returncode
3232                 self._current_task = None
3233                 self.wait()
3234
3235         def _post_phase_exit(self, post_phase):
3236                 if self._final_exit(post_phase) != os.EX_OK:
3237                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3238                                 noiselevel=-1)
3239                 self._current_task = None
3240                 self.wait()
3241                 return
3242
3243 class EbuildBinpkg(EbuildProcess):
3244         """
3245         This assumes that src_install() has successfully completed.
3246         """
3247         __slots__ = ("_binpkg_tmpfile",)
3248
3249         def _start(self):
3250                 self.phase = "package"
3251                 self.tree = "porttree"
3252                 pkg = self.pkg
3253                 root_config = pkg.root_config
3254                 portdb = root_config.trees["porttree"].dbapi
3255                 bintree = root_config.trees["bintree"]
3256                 ebuild_path = portdb.findname(self.pkg.cpv)
3257                 settings = self.settings
3258                 debug = settings.get("PORTAGE_DEBUG") == "1"
3259
3260                 bintree.prevent_collision(pkg.cpv)
3261                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3262                         pkg.cpv + ".tbz2." + str(os.getpid()))
3263                 self._binpkg_tmpfile = binpkg_tmpfile
3264                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3265                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3266
3267                 try:
3268                         EbuildProcess._start(self)
3269                 finally:
3270                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3271
3272         def _set_returncode(self, wait_retval):
3273                 EbuildProcess._set_returncode(self, wait_retval)
3274
3275                 pkg = self.pkg
3276                 bintree = pkg.root_config.trees["bintree"]
3277                 binpkg_tmpfile = self._binpkg_tmpfile
3278                 if self.returncode == os.EX_OK:
3279                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3280
3281 class EbuildMerge(SlotObject):
3282
3283         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3284                 "pkg", "pkg_count", "pkg_path", "pretend",
3285                 "scheduler", "settings", "tree", "world_atom")
3286
3287         def execute(self):
3288                 root_config = self.pkg.root_config
3289                 settings = self.settings
3290                 retval = portage.merge(settings["CATEGORY"],
3291                         settings["PF"], settings["D"],
3292                         os.path.join(settings["PORTAGE_BUILDDIR"],
3293                         "build-info"), root_config.root, settings,
3294                         myebuild=settings["EBUILD"],
3295                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3296                         vartree=root_config.trees["vartree"],
3297                         prev_mtimes=self.ldpath_mtimes,
3298                         scheduler=self.scheduler,
3299                         blockers=self.find_blockers)
3300
3301                 if retval == os.EX_OK:
3302                         self.world_atom(self.pkg)
3303                         self._log_success()
3304
3305                 return retval
3306
3307         def _log_success(self):
3308                 pkg = self.pkg
3309                 pkg_count = self.pkg_count
3310                 pkg_path = self.pkg_path
3311                 logger = self.logger
3312                 if "noclean" not in self.settings.features:
3313                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3314                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3315                         logger.log((" === (%s of %s) " + \
3316                                 "Post-Build Cleaning (%s::%s)") % \
3317                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3318                                 short_msg=short_msg)
3319                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3320                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3321
3322 class PackageUninstall(AsynchronousTask):
3323
3324         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3325
3326         def _start(self):
3327                 try:
3328                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3329                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3330                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3331                                 writemsg_level=self._writemsg_level)
3332                 except UninstallFailure, e:
3333                         self.returncode = e.status
3334                 else:
3335                         self.returncode = os.EX_OK
3336                 self.wait()
3337
3338         def _writemsg_level(self, msg, level=0, noiselevel=0):
3339
3340                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3341                 background = self.background
3342
3343                 if log_path is None:
3344                         if not (background and level < logging.WARNING):
3345                                 portage.util.writemsg_level(msg,
3346                                         level=level, noiselevel=noiselevel)
3347                 else:
3348                         if not background:
3349                                 portage.util.writemsg_level(msg,
3350                                         level=level, noiselevel=noiselevel)
3351
3352                         f = open(log_path, 'a')
3353                         try:
3354                                 f.write(msg)
3355                         finally:
3356                                 f.close()
3357
3358 class Binpkg(CompositeTask):
3359
3360         __slots__ = ("find_blockers",
3361                 "ldpath_mtimes", "logger", "opts",
3362                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3363                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3364                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3365
3366         def _writemsg_level(self, msg, level=0, noiselevel=0):
3367
3368                 if not self.background:
3369                         portage.util.writemsg_level(msg,
3370                                 level=level, noiselevel=noiselevel)
3371
3372                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3373                 if  log_path is not None:
3374                         f = open(log_path, 'a')
3375                         try:
3376                                 f.write(msg)
3377                         finally:
3378                                 f.close()
3379
3380         def _start(self):
3381
3382                 pkg = self.pkg
3383                 settings = self.settings
3384                 settings.setcpv(pkg)
3385                 self._tree = "bintree"
3386                 self._bintree = self.pkg.root_config.trees[self._tree]
3387                 self._verify = not self.opts.pretend
3388
3389                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3390                         "portage", pkg.category, pkg.pf)
3391                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3392                         pkg=pkg, settings=settings)
3393                 self._image_dir = os.path.join(dir_path, "image")
3394                 self._infloc = os.path.join(dir_path, "build-info")
3395                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3396                 settings["EBUILD"] = self._ebuild_path
3397                 debug = settings.get("PORTAGE_DEBUG") == "1"
3398                 portage.doebuild_environment(self._ebuild_path, "setup",
3399                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3400                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3401
3402                 # The prefetcher has already completed or it
3403                 # could be running now. If it's running now,
3404                 # wait for it to complete since it holds
3405                 # a lock on the file being fetched. The
3406                 # portage.locks functions are only designed
3407                 # to work between separate processes. Since
3408                 # the lock is held by the current process,
3409                 # use the scheduler and fetcher methods to
3410                 # synchronize with the fetcher.
3411                 prefetcher = self.prefetcher
3412                 if prefetcher is None:
3413                         pass
3414                 elif not prefetcher.isAlive():
3415                         prefetcher.cancel()
3416                 elif prefetcher.poll() is None:
3417
3418                         waiting_msg = ("Fetching '%s' " + \
3419                                 "in the background. " + \
3420                                 "To view fetch progress, run `tail -f " + \
3421                                 "/var/log/emerge-fetch.log` in another " + \
3422                                 "terminal.") % prefetcher.pkg_path
3423                         msg_prefix = colorize("GOOD", " * ")
3424                         from textwrap import wrap
3425                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3426                                 for line in wrap(waiting_msg, 65))
3427                         if not self.background:
3428                                 writemsg(waiting_msg, noiselevel=-1)
3429
3430                         self._current_task = prefetcher
3431                         prefetcher.addExitListener(self._prefetch_exit)
3432                         return
3433
3434                 self._prefetch_exit(prefetcher)
3435
3436         def _prefetch_exit(self, prefetcher):
3437
3438                 pkg = self.pkg
3439                 pkg_count = self.pkg_count
3440                 if not (self.opts.pretend or self.opts.fetchonly):
3441                         self._build_dir.lock()
3442                         # If necessary, discard old log so that we don't
3443                         # append to it.
3444                         self._build_dir.clean_log()
3445                         # Initialze PORTAGE_LOG_FILE.
3446                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3447                 fetcher = BinpkgFetcher(background=self.background,
3448                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3449                         pretend=self.opts.pretend, scheduler=self.scheduler)
3450                 pkg_path = fetcher.pkg_path
3451                 self._pkg_path = pkg_path
3452
3453                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3454
3455                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3456                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3457                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3458                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3459                         self.logger.log(msg, short_msg=short_msg)
3460                         self._start_task(fetcher, self._fetcher_exit)
3461                         return
3462
3463                 self._fetcher_exit(fetcher)
3464
3465         def _fetcher_exit(self, fetcher):
3466
3467                 # The fetcher only has a returncode when
3468                 # --getbinpkg is enabled.
3469                 if fetcher.returncode is not None:
3470                         self._fetched_pkg = True
3471                         if self._default_exit(fetcher) != os.EX_OK:
3472                                 self._unlock_builddir()
3473                                 self.wait()
3474                                 return
3475
3476                 if self.opts.pretend:
3477                         self._current_task = None
3478                         self.returncode = os.EX_OK
3479                         self.wait()
3480                         return
3481
3482                 verifier = None
3483                 if self._verify:
3484                         logfile = None
3485                         if self.background:
3486                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3487                         verifier = BinpkgVerifier(background=self.background,
3488                                 logfile=logfile, pkg=self.pkg)
3489                         self._start_task(verifier, self._verifier_exit)
3490                         return
3491
3492                 self._verifier_exit(verifier)
3493
3494         def _verifier_exit(self, verifier):
3495                 if verifier is not None and \
3496                         self._default_exit(verifier) != os.EX_OK:
3497                         self._unlock_builddir()
3498                         self.wait()
3499                         return
3500
3501                 logger = self.logger
3502                 pkg = self.pkg
3503                 pkg_count = self.pkg_count
3504                 pkg_path = self._pkg_path
3505
3506                 if self._fetched_pkg:
3507                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3508
3509                 if self.opts.fetchonly:
3510                         self._current_task = None
3511                         self.returncode = os.EX_OK
3512                         self.wait()
3513                         return
3514
3515                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3516                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3517                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3518                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3519                 logger.log(msg, short_msg=short_msg)
3520
3521                 phase = "clean"
3522                 settings = self.settings
3523                 ebuild_phase = EbuildPhase(background=self.background,
3524                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3525                         settings=settings, tree=self._tree)
3526
3527                 self._start_task(ebuild_phase, self._clean_exit)
3528
3529         def _clean_exit(self, clean_phase):
3530                 if self._default_exit(clean_phase) != os.EX_OK:
3531                         self._unlock_builddir()
3532                         self.wait()
3533                         return
3534
3535                 dir_path = self._build_dir.dir_path
3536
3537                 infloc = self._infloc
3538                 pkg = self.pkg
3539                 pkg_path = self._pkg_path
3540
3541                 dir_mode = 0755
3542                 for mydir in (dir_path, self._image_dir, infloc):
3543                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3544                                 gid=portage.data.portage_gid, mode=dir_mode)
3545
3546                 # This initializes PORTAGE_LOG_FILE.
3547                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3548                 self._writemsg_level(">>> Extracting info\n")
3549
3550                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3551                 check_missing_metadata = ("CATEGORY", "PF")
3552                 missing_metadata = set()
3553                 for k in check_missing_metadata:
3554                         v = pkg_xpak.getfile(k)
3555                         if not v:
3556                                 missing_metadata.add(k)
3557
3558                 pkg_xpak.unpackinfo(infloc)
3559                 for k in missing_metadata:
3560                         if k == "CATEGORY":
3561                                 v = pkg.category
3562                         elif k == "PF":
3563                                 v = pkg.pf
3564                         else:
3565                                 continue
3566
3567                         f = open(os.path.join(infloc, k), 'wb')
3568                         try:
3569                                 f.write(v + "\n")
3570                         finally:
3571                                 f.close()
3572
3573                 # Store the md5sum in the vdb.
3574                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3575                 try:
3576                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3577                 finally:
3578                         f.close()
3579
3580                 # This gives bashrc users an opportunity to do various things
3581                 # such as remove binary packages after they're installed.
3582                 settings = self.settings
3583                 settings.setcpv(self.pkg)
3584                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3585                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3586
3587                 phase = "setup"
3588                 setup_phase = EbuildPhase(background=self.background,
3589                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3590                         settings=settings, tree=self._tree)
3591
3592                 setup_phase.addExitListener(self._setup_exit)
3593                 self._current_task = setup_phase
3594                 self.scheduler.scheduleSetup(setup_phase)
3595
3596         def _setup_exit(self, setup_phase):
3597                 if self._default_exit(setup_phase) != os.EX_OK:
3598                         self._unlock_builddir()
3599                         self.wait()
3600                         return
3601
3602                 extractor = BinpkgExtractorAsync(background=self.background,
3603                         image_dir=self._image_dir,
3604                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3605                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3606                 self._start_task(extractor, self._extractor_exit)
3607
3608         def _extractor_exit(self, extractor):
3609                 if self._final_exit(extractor) != os.EX_OK:
3610                         self._unlock_builddir()
3611                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3612                                 noiselevel=-1)
3613                 self.wait()
3614
3615         def _unlock_builddir(self):
3616                 if self.opts.pretend or self.opts.fetchonly:
3617                         return
3618                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3619                 self._build_dir.unlock()
3620
3621         def install(self):
3622
3623                 # This gives bashrc users an opportunity to do various things
3624                 # such as remove binary packages after they're installed.
3625                 settings = self.settings
3626                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3627                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3628
3629                 merge = EbuildMerge(find_blockers=self.find_blockers,
3630                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3631                         pkg=self.pkg, pkg_count=self.pkg_count,
3632                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3633                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3634
3635                 try:
3636                         retval = merge.execute()
3637                 finally:
3638                         settings.pop("PORTAGE_BINPKG_FILE", None)
3639                         self._unlock_builddir()
3640                 return retval
3641
3642 class BinpkgFetcher(SpawnProcess):
3643
3644         __slots__ = ("pkg", "pretend",
3645                 "locked", "pkg_path", "_lock_obj")
3646
3647         def __init__(self, **kwargs):
3648                 SpawnProcess.__init__(self, **kwargs)
3649                 pkg = self.pkg
3650                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3651
3652         def _start(self):
3653
3654                 if self.cancelled:
3655                         return
3656
3657                 pkg = self.pkg
3658                 pretend = self.pretend
3659                 bintree = pkg.root_config.trees["bintree"]
3660                 settings = bintree.settings
3661                 use_locks = "distlocks" in settings.features
3662                 pkg_path = self.pkg_path
3663
3664                 if not pretend:
3665                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3666                         if use_locks:
3667                                 self.lock()
3668                 exists = os.path.exists(pkg_path)
3669                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3670                 if not (pretend or resume):
3671                         # Remove existing file or broken symlink.
3672                         try:
3673                                 os.unlink(pkg_path)
3674                         except OSError:
3675                                 pass
3676
3677                 # urljoin doesn't work correctly with
3678                 # unrecognized protocols like sftp
3679                 if bintree._remote_has_index:
3680                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3681                         if not rel_uri:
3682                                 rel_uri = pkg.cpv + ".tbz2"
3683                         uri = bintree._remote_base_uri.rstrip("/") + \
3684                                 "/" + rel_uri.lstrip("/")
3685                 else:
3686                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3687                                 "/" + pkg.pf + ".tbz2"
3688
3689                 if pretend:
3690                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3691                         self.returncode = os.EX_OK
3692                         self.wait()
3693                         return
3694
3695                 protocol = urlparse.urlparse(uri)[0]
3696                 fcmd_prefix = "FETCHCOMMAND"
3697                 if resume:
3698                         fcmd_prefix = "RESUMECOMMAND"
3699                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3700                 if not fcmd:
3701                         fcmd = settings.get(fcmd_prefix)
3702
3703                 fcmd_vars = {
3704                         "DISTDIR" : os.path.dirname(pkg_path),
3705                         "URI"     : uri,
3706                         "FILE"    : os.path.basename(pkg_path)
3707                 }
3708
3709                 fetch_env = dict(settings.iteritems())
3710                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3711                         for x in shlex.split(fcmd)]
3712
3713                 if self.fd_pipes is None:
3714                         self.fd_pipes = {}
3715                 fd_pipes = self.fd_pipes
3716
3717                 # Redirect all output to stdout since some fetchers like
3718                 # wget pollute stderr (if portage detects a problem then it
3719                 # can send it's own message to stderr).
3720                 fd_pipes.setdefault(0, sys.stdin.fileno())
3721                 fd_pipes.setdefault(1, sys.stdout.fileno())
3722                 fd_pipes.setdefault(2, sys.stdout.fileno())
3723
3724                 self.args = fetch_args
3725                 self.env = fetch_env
3726                 SpawnProcess._start(self)
3727
3728         def _set_returncode(self, wait_retval):
3729                 SpawnProcess._set_returncode(self, wait_retval)
3730                 if self.returncode == os.EX_OK:
3731                         # If possible, update the mtime to match the remote package if
3732                         # the fetcher didn't already do it automatically.
3733                         bintree = self.pkg.root_config.trees["bintree"]
3734                         if bintree._remote_has_index:
3735                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3736                                 if remote_mtime is not None:
3737                                         try:
3738                                                 remote_mtime = long(remote_mtime)
3739                                         except ValueError:
3740                                                 pass
3741                                         else:
3742                                                 try:
3743                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3744                                                 except OSError:
3745                                                         pass
3746                                                 else:
3747                                                         if remote_mtime != local_mtime:
3748                                                                 try:
3749                                                                         os.utime(self.pkg_path,
3750                                                                                 (remote_mtime, remote_mtime))
3751                                                                 except OSError:
3752                                                                         pass
3753
3754                 if self.locked:
3755                         self.unlock()
3756
3757         def lock(self):
3758                 """
3759                 This raises an AlreadyLocked exception if lock() is called
3760                 while a lock is already held. In order to avoid this, call
3761                 unlock() or check whether the "locked" attribute is True
3762                 or False before calling lock().
3763                 """
3764                 if self._lock_obj is not None:
3765                         raise self.AlreadyLocked((self._lock_obj,))
3766
3767                 self._lock_obj = portage.locks.lockfile(
3768                         self.pkg_path, wantnewlockfile=1)
3769                 self.locked = True
3770
3771         class AlreadyLocked(portage.exception.PortageException):
3772                 pass
3773
3774         def unlock(self):
3775                 if self._lock_obj is None:
3776                         return
3777                 portage.locks.unlockfile(self._lock_obj)
3778                 self._lock_obj = None
3779                 self.locked = False
3780
3781 class BinpkgVerifier(AsynchronousTask):
3782         __slots__ = ("logfile", "pkg",)
3783
3784         def _start(self):
3785                 """
3786                 Note: Unlike a normal AsynchronousTask.start() method,
3787                 this one does all work is synchronously. The returncode
3788                 attribute will be set before it returns.
3789                 """
3790
3791                 pkg = self.pkg
3792                 root_config = pkg.root_config
3793                 bintree = root_config.trees["bintree"]
3794                 rval = os.EX_OK
3795                 stdout_orig = sys.stdout
3796                 stderr_orig = sys.stderr
3797                 log_file = None
3798                 if self.background and self.logfile is not None:
3799                         log_file = open(self.logfile, 'a')
3800                 try:
3801                         if log_file is not None:
3802                                 sys.stdout = log_file
3803                                 sys.stderr = log_file
3804                         try:
3805                                 bintree.digestCheck(pkg)
3806                         except portage.exception.FileNotFound:
3807                                 writemsg("!!! Fetching Binary failed " + \
3808                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3809                                 rval = 1
3810                         except portage.exception.DigestException, e:
3811                                 writemsg("\n!!! Digest verification failed:\n",
3812                                         noiselevel=-1)
3813                                 writemsg("!!! %s\n" % e.value[0],
3814                                         noiselevel=-1)
3815                                 writemsg("!!! Reason: %s\n" % e.value[1],
3816                                         noiselevel=-1)
3817                                 writemsg("!!! Got: %s\n" % e.value[2],
3818                                         noiselevel=-1)
3819                                 writemsg("!!! Expected: %s\n" % e.value[3],
3820                                         noiselevel=-1)
3821                                 rval = 1
3822                         if rval != os.EX_OK:
3823                                 pkg_path = bintree.getname(pkg.cpv)
3824                                 head, tail = os.path.split(pkg_path)
3825                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3826                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3827                                         noiselevel=-1)
3828                 finally:
3829                         sys.stdout = stdout_orig
3830                         sys.stderr = stderr_orig
3831                         if log_file is not None:
3832                                 log_file.close()
3833
3834                 self.returncode = rval
3835                 self.wait()
3836
3837 class BinpkgPrefetcher(CompositeTask):
3838
3839         __slots__ = ("pkg",) + \
3840                 ("pkg_path", "_bintree",)
3841
3842         def _start(self):
3843                 self._bintree = self.pkg.root_config.trees["bintree"]
3844                 fetcher = BinpkgFetcher(background=self.background,
3845                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3846                         scheduler=self.scheduler)
3847                 self.pkg_path = fetcher.pkg_path
3848                 self._start_task(fetcher, self._fetcher_exit)
3849
3850         def _fetcher_exit(self, fetcher):
3851
3852                 if self._default_exit(fetcher) != os.EX_OK:
3853                         self.wait()
3854                         return
3855
3856                 verifier = BinpkgVerifier(background=self.background,
3857                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3858                 self._start_task(verifier, self._verifier_exit)
3859
3860         def _verifier_exit(self, verifier):
3861                 if self._default_exit(verifier) != os.EX_OK:
3862                         self.wait()
3863                         return
3864
3865                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3866
3867                 self._current_task = None
3868                 self.returncode = os.EX_OK
3869                 self.wait()
3870
3871 class BinpkgExtractorAsync(SpawnProcess):
3872
3873         __slots__ = ("image_dir", "pkg", "pkg_path")
3874
3875         _shell_binary = portage.const.BASH_BINARY
3876
3877         def _start(self):
3878                 self.args = [self._shell_binary, "-c",
3879                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3880                         (portage._shell_quote(self.pkg_path),
3881                         portage._shell_quote(self.image_dir))]
3882
3883                 self.env = self.pkg.root_config.settings.environ()
3884                 SpawnProcess._start(self)
3885
3886 class MergeListItem(CompositeTask):
3887
3888         """
3889         TODO: For parallel scheduling, everything here needs asynchronous
3890         execution support (start, poll, and wait methods).
3891         """
3892
3893         __slots__ = ("args_set",
3894                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3895                 "find_blockers", "logger", "mtimedb", "pkg",
3896                 "pkg_count", "pkg_to_replace", "prefetcher",
3897                 "settings", "statusMessage", "world_atom") + \
3898                 ("_install_task",)
3899
3900         def _start(self):
3901
3902                 pkg = self.pkg
3903                 build_opts = self.build_opts
3904
3905                 if pkg.installed:
3906                         # uninstall,  executed by self.merge()
3907                         self.returncode = os.EX_OK
3908                         self.wait()
3909                         return
3910
3911                 args_set = self.args_set
3912                 find_blockers = self.find_blockers
3913                 logger = self.logger
3914                 mtimedb = self.mtimedb
3915                 pkg_count = self.pkg_count
3916                 scheduler = self.scheduler
3917                 settings = self.settings
3918                 world_atom = self.world_atom
3919                 ldpath_mtimes = mtimedb["ldpath"]
3920
3921                 action_desc = "Emerging"
3922                 preposition = "for"
3923                 if pkg.type_name == "binary":
3924                         action_desc += " binary"
3925
3926                 if build_opts.fetchonly:
3927                         action_desc = "Fetching"
3928
3929                 msg = "%s (%s of %s) %s" % \
3930                         (action_desc,
3931                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3932                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3933                         colorize("GOOD", pkg.cpv))
3934
3935                 portdb = pkg.root_config.trees["porttree"].dbapi
3936                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3937                 if portdir_repo_name:
3938                         pkg_repo_name = pkg.metadata.get("repository")
3939                         if pkg_repo_name != portdir_repo_name:
3940                                 if not pkg_repo_name:
3941                                         pkg_repo_name = "unknown repo"
3942                                 msg += " from %s" % pkg_repo_name
3943
3944                 if pkg.root != "/":
3945                         msg += " %s %s" % (preposition, pkg.root)
3946
3947                 if not build_opts.pretend:
3948                         self.statusMessage(msg)
3949                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3950                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3951
3952                 if pkg.type_name == "ebuild":
3953
3954                         build = EbuildBuild(args_set=args_set,
3955                                 background=self.background,
3956                                 config_pool=self.config_pool,
3957                                 find_blockers=find_blockers,
3958                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3959                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3960                                 prefetcher=self.prefetcher, scheduler=scheduler,
3961                                 settings=settings, world_atom=world_atom)
3962
3963                         self._install_task = build
3964                         self._start_task(build, self._default_final_exit)
3965                         return
3966
3967                 elif pkg.type_name == "binary":
3968
3969                         binpkg = Binpkg(background=self.background,
3970                                 find_blockers=find_blockers,
3971                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3972                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3973                                 prefetcher=self.prefetcher, settings=settings,
3974                                 scheduler=scheduler, world_atom=world_atom)
3975
3976                         self._install_task = binpkg
3977                         self._start_task(binpkg, self._default_final_exit)
3978                         return
3979
3980         def _poll(self):
3981                 self._install_task.poll()
3982                 return self.returncode
3983
3984         def _wait(self):
3985                 self._install_task.wait()
3986                 return self.returncode
3987
3988         def merge(self):
3989
3990                 pkg = self.pkg
3991                 build_opts = self.build_opts
3992                 find_blockers = self.find_blockers
3993                 logger = self.logger
3994                 mtimedb = self.mtimedb
3995                 pkg_count = self.pkg_count
3996                 prefetcher = self.prefetcher
3997                 scheduler = self.scheduler
3998                 settings = self.settings
3999                 world_atom = self.world_atom
4000                 ldpath_mtimes = mtimedb["ldpath"]
4001
4002                 if pkg.installed:
4003                         if not (build_opts.buildpkgonly or \
4004                                 build_opts.fetchonly or build_opts.pretend):
4005
4006                                 uninstall = PackageUninstall(background=self.background,
4007                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4008                                         pkg=pkg, scheduler=scheduler, settings=settings)
4009
4010                                 uninstall.start()
4011                                 retval = uninstall.wait()
4012                                 if retval != os.EX_OK:
4013                                         return retval
4014                         return os.EX_OK
4015
4016                 if build_opts.fetchonly or \
4017                         build_opts.buildpkgonly:
4018                         return self.returncode
4019
4020                 retval = self._install_task.install()
4021                 return retval
4022
4023 class PackageMerge(AsynchronousTask):
4024         """
4025         TODO: Implement asynchronous merge so that the scheduler can
4026         run while a merge is executing.
4027         """
4028
4029         __slots__ = ("merge",)
4030
4031         def _start(self):
4032
4033                 pkg = self.merge.pkg
4034                 pkg_count = self.merge.pkg_count
4035
4036                 if pkg.installed:
4037                         action_desc = "Uninstalling"
4038                         preposition = "from"
4039                         counter_str = ""
4040                 else:
4041                         action_desc = "Installing"
4042                         preposition = "to"
4043                         counter_str = "(%s of %s) " % \
4044                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4045                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4046
4047                 msg = "%s %s%s" % \
4048                         (action_desc,
4049                         counter_str,
4050                         colorize("GOOD", pkg.cpv))
4051
4052                 if pkg.root != "/":
4053                         msg += " %s %s" % (preposition, pkg.root)
4054
4055                 if not self.merge.build_opts.fetchonly and \
4056                         not self.merge.build_opts.pretend and \
4057                         not self.merge.build_opts.buildpkgonly:
4058                         self.merge.statusMessage(msg)
4059
4060                 self.returncode = self.merge.merge()
4061                 self.wait()
4062
4063 class DependencyArg(object):
4064         def __init__(self, arg=None, root_config=None):
4065                 self.arg = arg
4066                 self.root_config = root_config
4067
4068         def __str__(self):
4069                 return str(self.arg)
4070
4071 class AtomArg(DependencyArg):
4072         def __init__(self, atom=None, **kwargs):
4073                 DependencyArg.__init__(self, **kwargs)
4074                 self.atom = atom
4075                 if not isinstance(self.atom, portage.dep.Atom):
4076                         self.atom = portage.dep.Atom(self.atom)
4077                 self.set = (self.atom, )
4078
4079 class PackageArg(DependencyArg):
4080         def __init__(self, package=None, **kwargs):
4081                 DependencyArg.__init__(self, **kwargs)
4082                 self.package = package
4083                 self.atom = portage.dep.Atom("=" + package.cpv)
4084                 self.set = (self.atom, )
4085
4086 class SetArg(DependencyArg):
4087         def __init__(self, set=None, **kwargs):
4088                 DependencyArg.__init__(self, **kwargs)
4089                 self.set = set
4090                 self.name = self.arg[len(SETPREFIX):]
4091
4092 class Dependency(SlotObject):
4093         __slots__ = ("atom", "blocker", "depth",
4094                 "parent", "onlydeps", "priority", "root")
4095         def __init__(self, **kwargs):
4096                 SlotObject.__init__(self, **kwargs)
4097                 if self.priority is None:
4098                         self.priority = DepPriority()
4099                 if self.depth is None:
4100                         self.depth = 0
4101
4102 class BlockerCache(portage.cache.mappings.MutableMapping):
4103         """This caches blockers of installed packages so that dep_check does not
4104         have to be done for every single installed package on every invocation of
4105         emerge.  The cache is invalidated whenever it is detected that something
4106         has changed that might alter the results of dep_check() calls:
4107                 1) the set of installed packages (including COUNTER) has changed
4108                 2) the old-style virtuals have changed
4109         """
4110
4111         # Number of uncached packages to trigger cache update, since
4112         # it's wasteful to update it for every vdb change.
4113         _cache_threshold = 5
4114
4115         class BlockerData(object):
4116
4117                 __slots__ = ("__weakref__", "atoms", "counter")
4118
4119                 def __init__(self, counter, atoms):
4120                         self.counter = counter
4121                         self.atoms = atoms
4122
4123         def __init__(self, myroot, vardb):
4124                 self._vardb = vardb
4125                 self._virtuals = vardb.settings.getvirtuals()
4126                 self._cache_filename = os.path.join(myroot,
4127                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4128                 self._cache_version = "1"
4129                 self._cache_data = None
4130                 self._modified = set()
4131                 self._load()
4132
4133         def _load(self):
4134                 try:
4135                         f = open(self._cache_filename, mode='rb')
4136                         mypickle = pickle.Unpickler(f)
4137                         try:
4138                                 mypickle.find_global = None
4139                         except AttributeError:
4140                                 # TODO: If py3k, override Unpickler.find_class().
4141                                 pass
4142                         self._cache_data = mypickle.load()
4143                         f.close()
4144                         del f
4145                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4146                         if isinstance(e, pickle.UnpicklingError):
4147                                 writemsg("!!! Error loading '%s': %s\n" % \
4148                                         (self._cache_filename, str(e)), noiselevel=-1)
4149                         del e
4150
4151                 cache_valid = self._cache_data and \
4152                         isinstance(self._cache_data, dict) and \
4153                         self._cache_data.get("version") == self._cache_version and \
4154                         isinstance(self._cache_data.get("blockers"), dict)
4155                 if cache_valid:
4156                         # Validate all the atoms and counters so that
4157                         # corruption is detected as soon as possible.
4158                         invalid_items = set()
4159                         for k, v in self._cache_data["blockers"].iteritems():
4160                                 if not isinstance(k, basestring):
4161                                         invalid_items.add(k)
4162                                         continue
4163                                 try:
4164                                         if portage.catpkgsplit(k) is None:
4165                                                 invalid_items.add(k)
4166                                                 continue
4167                                 except portage.exception.InvalidData:
4168                                         invalid_items.add(k)
4169                                         continue
4170                                 if not isinstance(v, tuple) or \
4171                                         len(v) != 2:
4172                                         invalid_items.add(k)
4173                                         continue
4174                                 counter, atoms = v
4175                                 if not isinstance(counter, (int, long)):
4176                                         invalid_items.add(k)
4177                                         continue
4178                                 if not isinstance(atoms, (list, tuple)):
4179                                         invalid_items.add(k)
4180                                         continue
4181                                 invalid_atom = False
4182                                 for atom in atoms:
4183                                         if not isinstance(atom, basestring):
4184                                                 invalid_atom = True
4185                                                 break
4186                                         if atom[:1] != "!" or \
4187                                                 not portage.isvalidatom(
4188                                                 atom, allow_blockers=True):
4189                                                 invalid_atom = True
4190                                                 break
4191                                 if invalid_atom:
4192                                         invalid_items.add(k)
4193                                         continue
4194
4195                         for k in invalid_items:
4196                                 del self._cache_data["blockers"][k]
4197                         if not self._cache_data["blockers"]:
4198                                 cache_valid = False
4199
4200                 if not cache_valid:
4201                         self._cache_data = {"version":self._cache_version}
4202                         self._cache_data["blockers"] = {}
4203                         self._cache_data["virtuals"] = self._virtuals
4204                 self._modified.clear()
4205
4206         def flush(self):
4207                 """If the current user has permission and the internal blocker cache
4208                 been updated, save it to disk and mark it unmodified.  This is called
4209                 by emerge after it has proccessed blockers for all installed packages.
4210                 Currently, the cache is only written if the user has superuser
4211                 privileges (since that's required to obtain a lock), but all users
4212                 have read access and benefit from faster blocker lookups (as long as
4213                 the entire cache is still valid).  The cache is stored as a pickled
4214                 dict object with the following format:
4215
4216                 {
4217                         version : "1",
4218                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4219                         "virtuals" : vardb.settings.getvirtuals()
4220                 }
4221                 """
4222                 if len(self._modified) >= self._cache_threshold and \
4223                         secpass >= 2:
4224                         try:
4225                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4226                                 pickle.dump(self._cache_data, f, protocol=2)
4227                                 f.close()
4228                                 portage.util.apply_secpass_permissions(
4229                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4230                         except (IOError, OSError), e:
4231                                 pass
4232                         self._modified.clear()
4233
4234         def __setitem__(self, cpv, blocker_data):
4235                 """
4236                 Update the cache and mark it as modified for a future call to
4237                 self.flush().
4238
4239                 @param cpv: Package for which to cache blockers.
4240                 @type cpv: String
4241                 @param blocker_data: An object with counter and atoms attributes.
4242                 @type blocker_data: BlockerData
4243                 """
4244                 self._cache_data["blockers"][cpv] = \
4245                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4246                 self._modified.add(cpv)
4247
4248         def __iter__(self):
4249                 if self._cache_data is None:
4250                         # triggered by python-trace
4251                         return iter([])
4252                 return iter(self._cache_data["blockers"])
4253
4254         def __delitem__(self, cpv):
4255                 del self._cache_data["blockers"][cpv]
4256
4257         def __getitem__(self, cpv):
4258                 """
4259                 @rtype: BlockerData
4260                 @returns: An object with counter and atoms attributes.
4261                 """
4262                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4263
4264 class BlockerDB(object):
4265
4266         def __init__(self, root_config):
4267                 self._root_config = root_config
4268                 self._vartree = root_config.trees["vartree"]
4269                 self._portdb = root_config.trees["porttree"].dbapi
4270
4271                 self._dep_check_trees = None
4272                 self._fake_vartree = None
4273
4274         def _get_fake_vartree(self, acquire_lock=0):
4275                 fake_vartree = self._fake_vartree
4276                 if fake_vartree is None:
4277                         fake_vartree = FakeVartree(self._root_config,
4278                                 acquire_lock=acquire_lock)
4279                         self._fake_vartree = fake_vartree
4280                         self._dep_check_trees = { self._vartree.root : {
4281                                 "porttree"    :  fake_vartree,
4282                                 "vartree"     :  fake_vartree,
4283                         }}
4284                 else:
4285                         fake_vartree.sync(acquire_lock=acquire_lock)
4286                 return fake_vartree
4287
4288         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4289                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4290                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4291                 settings = self._vartree.settings
4292                 stale_cache = set(blocker_cache)
4293                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4294                 dep_check_trees = self._dep_check_trees
4295                 vardb = fake_vartree.dbapi
4296                 installed_pkgs = list(vardb)
4297
4298                 for inst_pkg in installed_pkgs:
4299                         stale_cache.discard(inst_pkg.cpv)
4300                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4301                         if cached_blockers is not None and \
4302                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4303                                 cached_blockers = None
4304                         if cached_blockers is not None:
4305                                 blocker_atoms = cached_blockers.atoms
4306                         else:
4307                                 # Use aux_get() to trigger FakeVartree global
4308                                 # updates on *DEPEND when appropriate.
4309                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4310                                 try:
4311                                         portage.dep._dep_check_strict = False
4312                                         success, atoms = portage.dep_check(depstr,
4313                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4314                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4315                                 finally:
4316                                         portage.dep._dep_check_strict = True
4317                                 if not success:
4318                                         pkg_location = os.path.join(inst_pkg.root,
4319                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4320                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4321                                                 (pkg_location, atoms), noiselevel=-1)
4322                                         continue
4323
4324                                 blocker_atoms = [atom for atom in atoms \
4325                                         if atom.startswith("!")]
4326                                 blocker_atoms.sort()
4327                                 counter = long(inst_pkg.metadata["COUNTER"])
4328                                 blocker_cache[inst_pkg.cpv] = \
4329                                         blocker_cache.BlockerData(counter, blocker_atoms)
4330                 for cpv in stale_cache:
4331                         del blocker_cache[cpv]
4332                 blocker_cache.flush()
4333
4334                 blocker_parents = digraph()
4335                 blocker_atoms = []
4336                 for pkg in installed_pkgs:
4337                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4338                                 blocker_atom = blocker_atom.lstrip("!")
4339                                 blocker_atoms.append(blocker_atom)
4340                                 blocker_parents.add(blocker_atom, pkg)
4341
4342                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4343                 blocking_pkgs = set()
4344                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4345                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4346
4347                 # Check for blockers in the other direction.
4348                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4349                 try:
4350                         portage.dep._dep_check_strict = False
4351                         success, atoms = portage.dep_check(depstr,
4352                                 vardb, settings, myuse=new_pkg.use.enabled,
4353                                 trees=dep_check_trees, myroot=new_pkg.root)
4354                 finally:
4355                         portage.dep._dep_check_strict = True
4356                 if not success:
4357                         # We should never get this far with invalid deps.
4358                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4359                         assert False
4360
4361                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4362                         if atom[:1] == "!"]
4363                 if blocker_atoms:
4364                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4365                         for inst_pkg in installed_pkgs:
4366                                 try:
4367                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4368                                 except (portage.exception.InvalidDependString, StopIteration):
4369                                         continue
4370                                 blocking_pkgs.add(inst_pkg)
4371
4372                 return blocking_pkgs
4373
4374 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4375
4376         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4377                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4378         p_type, p_root, p_key, p_status = parent_node
4379         msg = []
4380         if p_status == "nomerge":
4381                 category, pf = portage.catsplit(p_key)
4382                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4383                 msg.append("Portage is unable to process the dependencies of the ")
4384                 msg.append("'%s' package. " % p_key)
4385                 msg.append("In order to correct this problem, the package ")
4386                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4387                 msg.append("As a temporary workaround, the --nodeps option can ")
4388                 msg.append("be used to ignore all dependencies.  For reference, ")
4389                 msg.append("the problematic dependencies can be found in the ")
4390                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4391         else:
4392                 msg.append("This package can not be installed. ")
4393                 msg.append("Please notify the '%s' package maintainer " % p_key)
4394                 msg.append("about this problem.")
4395
4396         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4397         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4398
4399 class PackageVirtualDbapi(portage.dbapi):
4400         """
4401         A dbapi-like interface class that represents the state of the installed
4402         package database as new packages are installed, replacing any packages
4403         that previously existed in the same slot. The main difference between
4404         this class and fakedbapi is that this one uses Package instances
4405         internally (passed in via cpv_inject() and cpv_remove() calls).
4406         """
4407         def __init__(self, settings):
4408                 portage.dbapi.__init__(self)
4409                 self.settings = settings
4410                 self._match_cache = {}
4411                 self._cp_map = {}
4412                 self._cpv_map = {}
4413
4414         def clear(self):
4415                 """
4416                 Remove all packages.
4417                 """
4418                 if self._cpv_map:
4419                         self._clear_cache()
4420                         self._cp_map.clear()
4421                         self._cpv_map.clear()
4422
4423         def copy(self):
4424                 obj = PackageVirtualDbapi(self.settings)
4425                 obj._match_cache = self._match_cache.copy()
4426                 obj._cp_map = self._cp_map.copy()
4427                 for k, v in obj._cp_map.iteritems():
4428                         obj._cp_map[k] = v[:]
4429                 obj._cpv_map = self._cpv_map.copy()
4430                 return obj
4431
4432         def __iter__(self):
4433                 return self._cpv_map.itervalues()
4434
4435         def __contains__(self, item):
4436                 existing = self._cpv_map.get(item.cpv)
4437                 if existing is not None and \
4438                         existing == item:
4439                         return True
4440                 return False
4441
4442         def get(self, item, default=None):
4443                 cpv = getattr(item, "cpv", None)
4444                 if cpv is None:
4445                         if len(item) != 4:
4446                                 return default
4447                         type_name, root, cpv, operation = item
4448
4449                 existing = self._cpv_map.get(cpv)
4450                 if existing is not None and \
4451                         existing == item:
4452                         return existing
4453                 return default
4454
4455         def match_pkgs(self, atom):
4456                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4457
4458         def _clear_cache(self):
4459                 if self._categories is not None:
4460                         self._categories = None
4461                 if self._match_cache:
4462                         self._match_cache = {}
4463
4464         def match(self, origdep, use_cache=1):
4465                 result = self._match_cache.get(origdep)
4466                 if result is not None:
4467                         return result[:]
4468                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4469                 self._match_cache[origdep] = result
4470                 return result[:]
4471
4472         def cpv_exists(self, cpv):
4473                 return cpv in self._cpv_map
4474
4475         def cp_list(self, mycp, use_cache=1):
4476                 cachelist = self._match_cache.get(mycp)
4477                 # cp_list() doesn't expand old-style virtuals
4478                 if cachelist and cachelist[0].startswith(mycp):
4479                         return cachelist[:]
4480                 cpv_list = self._cp_map.get(mycp)
4481                 if cpv_list is None:
4482                         cpv_list = []
4483                 else:
4484                         cpv_list = [pkg.cpv for pkg in cpv_list]
4485                 self._cpv_sort_ascending(cpv_list)
4486                 if not (not cpv_list and mycp.startswith("virtual/")):
4487                         self._match_cache[mycp] = cpv_list
4488                 return cpv_list[:]
4489
4490         def cp_all(self):
4491                 return list(self._cp_map)
4492
4493         def cpv_all(self):
4494                 return list(self._cpv_map)
4495
4496         def cpv_inject(self, pkg):
4497                 cp_list = self._cp_map.get(pkg.cp)
4498                 if cp_list is None:
4499                         cp_list = []
4500                         self._cp_map[pkg.cp] = cp_list
4501                 e_pkg = self._cpv_map.get(pkg.cpv)
4502                 if e_pkg is not None:
4503                         if e_pkg == pkg:
4504                                 return
4505                         self.cpv_remove(e_pkg)
4506                 for e_pkg in cp_list:
4507                         if e_pkg.slot_atom == pkg.slot_atom:
4508                                 if e_pkg == pkg:
4509                                         return
4510                                 self.cpv_remove(e_pkg)
4511                                 break
4512                 cp_list.append(pkg)
4513                 self._cpv_map[pkg.cpv] = pkg
4514                 self._clear_cache()
4515
4516         def cpv_remove(self, pkg):
4517                 old_pkg = self._cpv_map.get(pkg.cpv)
4518                 if old_pkg != pkg:
4519                         raise KeyError(pkg)
4520                 self._cp_map[pkg.cp].remove(pkg)
4521                 del self._cpv_map[pkg.cpv]
4522                 self._clear_cache()
4523
4524         def aux_get(self, cpv, wants):
4525                 metadata = self._cpv_map[cpv].metadata
4526                 return [metadata.get(x, "") for x in wants]
4527
4528         def aux_update(self, cpv, values):
4529                 self._cpv_map[cpv].metadata.update(values)
4530                 self._clear_cache()
4531
4532 class depgraph(object):
4533
4534         pkg_tree_map = RootConfig.pkg_tree_map
4535
4536         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4537
4538         def __init__(self, settings, trees, myopts, myparams, spinner):
4539                 self.settings = settings
4540                 self.target_root = settings["ROOT"]
4541                 self.myopts = myopts
4542                 self.myparams = myparams
4543                 self.edebug = 0
4544                 if settings.get("PORTAGE_DEBUG", "") == "1":
4545                         self.edebug = 1
4546                 self.spinner = spinner
4547                 self._running_root = trees["/"]["root_config"]
4548                 self._opts_no_restart = Scheduler._opts_no_restart
4549                 self.pkgsettings = {}
4550                 # Maps slot atom to package for each Package added to the graph.
4551                 self._slot_pkg_map = {}
4552                 # Maps nodes to the reasons they were selected for reinstallation.
4553                 self._reinstall_nodes = {}
4554                 self.mydbapi = {}
4555                 self.trees = {}
4556                 self._trees_orig = trees
4557                 self.roots = {}
4558                 # Contains a filtered view of preferred packages that are selected
4559                 # from available repositories.
4560                 self._filtered_trees = {}
4561                 # Contains installed packages and new packages that have been added
4562                 # to the graph.
4563                 self._graph_trees = {}
4564                 # All Package instances
4565                 self._pkg_cache = {}
4566                 for myroot in trees:
4567                         self.trees[myroot] = {}
4568                         # Create a RootConfig instance that references
4569                         # the FakeVartree instead of the real one.
4570                         self.roots[myroot] = RootConfig(
4571                                 trees[myroot]["vartree"].settings,
4572                                 self.trees[myroot],
4573                                 trees[myroot]["root_config"].setconfig)
4574                         for tree in ("porttree", "bintree"):
4575                                 self.trees[myroot][tree] = trees[myroot][tree]
4576                         self.trees[myroot]["vartree"] = \
4577                                 FakeVartree(trees[myroot]["root_config"],
4578                                         pkg_cache=self._pkg_cache)
4579                         self.pkgsettings[myroot] = portage.config(
4580                                 clone=self.trees[myroot]["vartree"].settings)
4581                         self._slot_pkg_map[myroot] = {}
4582                         vardb = self.trees[myroot]["vartree"].dbapi
4583                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4584                                 "--buildpkgonly" not in self.myopts
4585                         # This fakedbapi instance will model the state that the vdb will
4586                         # have after new packages have been installed.
4587                         fakedb = PackageVirtualDbapi(vardb.settings)
4588                         if preload_installed_pkgs:
4589                                 for pkg in vardb:
4590                                         self.spinner.update()
4591                                         # This triggers metadata updates via FakeVartree.
4592                                         vardb.aux_get(pkg.cpv, [])
4593                                         fakedb.cpv_inject(pkg)
4594
4595                         # Now that the vardb state is cached in our FakeVartree,
4596                         # we won't be needing the real vartree cache for awhile.
4597                         # To make some room on the heap, clear the vardbapi
4598                         # caches.
4599                         trees[myroot]["vartree"].dbapi._clear_cache()
4600                         gc.collect()
4601
4602                         self.mydbapi[myroot] = fakedb
4603                         def graph_tree():
4604                                 pass
4605                         graph_tree.dbapi = fakedb
4606                         self._graph_trees[myroot] = {}
4607                         self._filtered_trees[myroot] = {}
4608                         # Substitute the graph tree for the vartree in dep_check() since we
4609                         # want atom selections to be consistent with package selections
4610                         # have already been made.
4611                         self._graph_trees[myroot]["porttree"]   = graph_tree
4612                         self._graph_trees[myroot]["vartree"]    = graph_tree
4613                         def filtered_tree():
4614                                 pass
4615                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4616                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4617
4618                         # Passing in graph_tree as the vartree here could lead to better
4619                         # atom selections in some cases by causing atoms for packages that
4620                         # have been added to the graph to be preferred over other choices.
4621                         # However, it can trigger atom selections that result in
4622                         # unresolvable direct circular dependencies. For example, this
4623                         # happens with gwydion-dylan which depends on either itself or
4624                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4625                         # gwydion-dylan-bin needs to be selected in order to avoid a
4626                         # an unresolvable direct circular dependency.
4627                         #
4628                         # To solve the problem described above, pass in "graph_db" so that
4629                         # packages that have been added to the graph are distinguishable
4630                         # from other available packages and installed packages. Also, pass
4631                         # the parent package into self._select_atoms() calls so that
4632                         # unresolvable direct circular dependencies can be detected and
4633                         # avoided when possible.
4634                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4635                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4636
4637                         dbs = []
4638                         portdb = self.trees[myroot]["porttree"].dbapi
4639                         bindb  = self.trees[myroot]["bintree"].dbapi
4640                         vardb  = self.trees[myroot]["vartree"].dbapi
4641                         #               (db, pkg_type, built, installed, db_keys)
4642                         if "--usepkgonly" not in self.myopts:
4643                                 db_keys = list(portdb._aux_cache_keys)
4644                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4645                         if "--usepkg" in self.myopts:
4646                                 db_keys = list(bindb._aux_cache_keys)
4647                                 dbs.append((bindb,  "binary", True, False, db_keys))
4648                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4649                         dbs.append((vardb, "installed", True, True, db_keys))
4650                         self._filtered_trees[myroot]["dbs"] = dbs
4651                         if "--usepkg" in self.myopts:
4652                                 self.trees[myroot]["bintree"].populate(
4653                                         "--getbinpkg" in self.myopts,
4654                                         "--getbinpkgonly" in self.myopts)
4655                 del trees
4656
4657                 self.digraph=portage.digraph()
4658                 # contains all sets added to the graph
4659                 self._sets = {}
4660                 # contains atoms given as arguments
4661                 self._sets["args"] = InternalPackageSet()
4662                 # contains all atoms from all sets added to the graph, including
4663                 # atoms given as arguments
4664                 self._set_atoms = InternalPackageSet()
4665                 self._atom_arg_map = {}
4666                 # contains all nodes pulled in by self._set_atoms
4667                 self._set_nodes = set()
4668                 # Contains only Blocker -> Uninstall edges
4669                 self._blocker_uninstalls = digraph()
4670                 # Contains only Package -> Blocker edges
4671                 self._blocker_parents = digraph()
4672                 # Contains only irrelevant Package -> Blocker edges
4673                 self._irrelevant_blockers = digraph()
4674                 # Contains only unsolvable Package -> Blocker edges
4675                 self._unsolvable_blockers = digraph()
4676                 # Contains all Blocker -> Blocked Package edges
4677                 self._blocked_pkgs = digraph()
4678                 # Contains world packages that have been protected from
4679                 # uninstallation but may not have been added to the graph
4680                 # if the graph is not complete yet.
4681                 self._blocked_world_pkgs = {}
4682                 self._slot_collision_info = {}
4683                 # Slot collision nodes are not allowed to block other packages since
4684                 # blocker validation is only able to account for one package per slot.
4685                 self._slot_collision_nodes = set()
4686                 self._parent_atoms = {}
4687                 self._slot_conflict_parent_atoms = set()
4688                 self._serialized_tasks_cache = None
4689                 self._scheduler_graph = None
4690                 self._displayed_list = None
4691                 self._pprovided_args = []
4692                 self._missing_args = []
4693                 self._masked_installed = set()
4694                 self._unsatisfied_deps_for_display = []
4695                 self._unsatisfied_blockers_for_display = None
4696                 self._circular_deps_for_display = None
4697                 self._dep_stack = []
4698                 self._unsatisfied_deps = []
4699                 self._initially_unsatisfied_deps = []
4700                 self._ignored_deps = []
4701                 self._required_set_names = set(["system", "world"])
4702                 self._select_atoms = self._select_atoms_highest_available
4703                 self._select_package = self._select_pkg_highest_available
4704                 self._highest_pkg_cache = {}
4705
4706         def _show_slot_collision_notice(self):
4707                 """Show an informational message advising the user to mask one of the
4708                 the packages. In some cases it may be possible to resolve this
4709                 automatically, but support for backtracking (removal nodes that have
4710                 already been selected) will be required in order to handle all possible
4711                 cases.
4712                 """
4713
4714                 if not self._slot_collision_info:
4715                         return
4716
4717                 self._show_merge_list()
4718
4719                 msg = []
4720                 msg.append("\n!!! Multiple package instances within a single " + \
4721                         "package slot have been pulled\n")
4722                 msg.append("!!! into the dependency graph, resulting" + \
4723                         " in a slot conflict:\n\n")
4724                 indent = "  "
4725                 # Max number of parents shown, to avoid flooding the display.
4726                 max_parents = 3
4727                 explanation_columns = 70
4728                 explanations = 0
4729                 for (slot_atom, root), slot_nodes \
4730                         in self._slot_collision_info.iteritems():
4731                         msg.append(str(slot_atom))
4732                         msg.append("\n\n")
4733
4734                         for node in slot_nodes:
4735                                 msg.append(indent)
4736                                 msg.append(str(node))
4737                                 parent_atoms = self._parent_atoms.get(node)
4738                                 if parent_atoms:
4739                                         pruned_list = set()
4740                                         # Prefer conflict atoms over others.
4741                                         for parent_atom in parent_atoms:
4742                                                 if len(pruned_list) >= max_parents:
4743                                                         break
4744                                                 if parent_atom in self._slot_conflict_parent_atoms:
4745                                                         pruned_list.add(parent_atom)
4746
4747                                         # If this package was pulled in by conflict atoms then
4748                                         # show those alone since those are the most interesting.
4749                                         if not pruned_list:
4750                                                 # When generating the pruned list, prefer instances
4751                                                 # of DependencyArg over instances of Package.
4752                                                 for parent_atom in parent_atoms:
4753                                                         if len(pruned_list) >= max_parents:
4754                                                                 break
4755                                                         parent, atom = parent_atom
4756                                                         if isinstance(parent, DependencyArg):
4757                                                                 pruned_list.add(parent_atom)
4758                                                 # Prefer Packages instances that themselves have been
4759                                                 # pulled into collision slots.
4760                                                 for parent_atom in parent_atoms:
4761                                                         if len(pruned_list) >= max_parents:
4762                                                                 break
4763                                                         parent, atom = parent_atom
4764                                                         if isinstance(parent, Package) and \
4765                                                                 (parent.slot_atom, parent.root) \
4766                                                                 in self._slot_collision_info:
4767                                                                 pruned_list.add(parent_atom)
4768                                                 for parent_atom in parent_atoms:
4769                                                         if len(pruned_list) >= max_parents:
4770                                                                 break
4771                                                         pruned_list.add(parent_atom)
4772                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4773                                         parent_atoms = pruned_list
4774                                         msg.append(" pulled in by\n")
4775                                         for parent_atom in parent_atoms:
4776                                                 parent, atom = parent_atom
4777                                                 msg.append(2*indent)
4778                                                 if isinstance(parent,
4779                                                         (PackageArg, AtomArg)):
4780                                                         # For PackageArg and AtomArg types, it's
4781                                                         # redundant to display the atom attribute.
4782                                                         msg.append(str(parent))
4783                                                 else:
4784                                                         # Display the specific atom from SetArg or
4785                                                         # Package types.
4786                                                         msg.append("%s required by %s" % (atom, parent))
4787                                                 msg.append("\n")
4788                                         if omitted_parents:
4789                                                 msg.append(2*indent)
4790                                                 msg.append("(and %d more)\n" % omitted_parents)
4791                                 else:
4792                                         msg.append(" (no parents)\n")
4793                                 msg.append("\n")
4794                         explanation = self._slot_conflict_explanation(slot_nodes)
4795                         if explanation:
4796                                 explanations += 1
4797                                 msg.append(indent + "Explanation:\n\n")
4798                                 for line in textwrap.wrap(explanation, explanation_columns):
4799                                         msg.append(2*indent + line + "\n")
4800                                 msg.append("\n")
4801                 msg.append("\n")
4802                 sys.stderr.write("".join(msg))
4803                 sys.stderr.flush()
4804
4805                 explanations_for_all = explanations == len(self._slot_collision_info)
4806
4807                 if explanations_for_all or "--quiet" in self.myopts:
4808                         return
4809
4810                 msg = []
4811                 msg.append("It may be possible to solve this problem ")
4812                 msg.append("by using package.mask to prevent one of ")
4813                 msg.append("those packages from being selected. ")
4814                 msg.append("However, it is also possible that conflicting ")
4815                 msg.append("dependencies exist such that they are impossible to ")
4816                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4817                 msg.append("the dependencies of two different packages, then those ")
4818                 msg.append("packages can not be installed simultaneously.")
4819
4820                 from formatter import AbstractFormatter, DumbWriter
4821                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4822                 for x in msg:
4823                         f.add_flowing_data(x)
4824                 f.end_paragraph(1)
4825
4826                 msg = []
4827                 msg.append("For more information, see MASKED PACKAGES ")
4828                 msg.append("section in the emerge man page or refer ")
4829                 msg.append("to the Gentoo Handbook.")
4830                 for x in msg:
4831                         f.add_flowing_data(x)
4832                 f.end_paragraph(1)
4833                 f.writer.flush()
4834
4835         def _slot_conflict_explanation(self, slot_nodes):
4836                 """
4837                 When a slot conflict occurs due to USE deps, there are a few
4838                 different cases to consider:
4839
4840                 1) New USE are correctly set but --newuse wasn't requested so an
4841                    installed package with incorrect USE happened to get pulled
4842                    into graph before the new one.
4843
4844                 2) New USE are incorrectly set but an installed package has correct
4845                    USE so it got pulled into the graph, and a new instance also got
4846                    pulled in due to --newuse or an upgrade.
4847
4848                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4849                    and multiple package instances got pulled into the same slot to
4850                    satisfy the conflicting deps.
4851
4852                 Currently, explanations and suggested courses of action are generated
4853                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4854                 """
4855
4856                 if len(slot_nodes) != 2:
4857                         # Suggestions are only implemented for
4858                         # conflicts between two packages.
4859                         return None
4860
4861                 all_conflict_atoms = self._slot_conflict_parent_atoms
4862                 matched_node = None
4863                 matched_atoms = None
4864                 unmatched_node = None
4865                 for node in slot_nodes:
4866                         parent_atoms = self._parent_atoms.get(node)
4867                         if not parent_atoms:
4868                                 # Normally, there are always parent atoms. If there are
4869                                 # none then something unexpected is happening and there's
4870                                 # currently no suggestion for this case.
4871                                 return None
4872                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4873                         for parent_atom in conflict_atoms:
4874                                 parent, atom = parent_atom
4875                                 if not atom.use:
4876                                         # Suggestions are currently only implemented for cases
4877                                         # in which all conflict atoms have USE deps.
4878                                         return None
4879                         if conflict_atoms:
4880                                 if matched_node is not None:
4881                                         # If conflict atoms match multiple nodes
4882                                         # then there's no suggestion.
4883                                         return None
4884                                 matched_node = node
4885                                 matched_atoms = conflict_atoms
4886                         else:
4887                                 if unmatched_node is not None:
4888                                         # Neither node is matched by conflict atoms, and
4889                                         # there is no suggestion for this case.
4890                                         return None
4891                                 unmatched_node = node
4892
4893                 if matched_node is None or unmatched_node is None:
4894                         # This shouldn't happen.
4895                         return None
4896
4897                 if unmatched_node.installed and not matched_node.installed and \
4898                         unmatched_node.cpv == matched_node.cpv:
4899                         # If the conflicting packages are the same version then
4900                         # --newuse should be all that's needed. If they are different
4901                         # versions then there's some other problem.
4902                         return "New USE are correctly set, but --newuse wasn't" + \
4903                                 " requested, so an installed package with incorrect USE " + \
4904                                 "happened to get pulled into the dependency graph. " + \
4905                                 "In order to solve " + \
4906                                 "this, either specify the --newuse option or explicitly " + \
4907                                 " reinstall '%s'." % matched_node.slot_atom
4908
4909                 if matched_node.installed and not unmatched_node.installed:
4910                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4911                         explanation = ("New USE for '%s' are incorrectly set. " + \
4912                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4913                                 (matched_node.slot_atom, atoms[0])
4914                         if len(atoms) > 1:
4915                                 for atom in atoms[1:-1]:
4916                                         explanation += ", '%s'" % (atom,)
4917                                 if len(atoms) > 2:
4918                                         explanation += ","
4919                                 explanation += " and '%s'" % (atoms[-1],)
4920                         explanation += "."
4921                         return explanation
4922
4923                 return None
4924
4925         def _process_slot_conflicts(self):
4926                 """
4927                 Process slot conflict data to identify specific atoms which
4928                 lead to conflict. These atoms only match a subset of the
4929                 packages that have been pulled into a given slot.
4930                 """
4931                 for (slot_atom, root), slot_nodes \
4932                         in self._slot_collision_info.iteritems():
4933
4934                         all_parent_atoms = set()
4935                         for pkg in slot_nodes:
4936                                 parent_atoms = self._parent_atoms.get(pkg)
4937                                 if not parent_atoms:
4938                                         continue
4939                                 all_parent_atoms.update(parent_atoms)
4940
4941                         for pkg in slot_nodes:
4942                                 parent_atoms = self._parent_atoms.get(pkg)
4943                                 if parent_atoms is None:
4944                                         parent_atoms = set()
4945                                         self._parent_atoms[pkg] = parent_atoms
4946                                 for parent_atom in all_parent_atoms:
4947                                         if parent_atom in parent_atoms:
4948                                                 continue
4949                                         # Use package set for matching since it will match via
4950                                         # PROVIDE when necessary, while match_from_list does not.
4951                                         parent, atom = parent_atom
4952                                         atom_set = InternalPackageSet(
4953                                                 initial_atoms=(atom,))
4954                                         if atom_set.findAtomForPackage(pkg):
4955                                                 parent_atoms.add(parent_atom)
4956                                         else:
4957                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4958
4959         def _reinstall_for_flags(self, forced_flags,
4960                 orig_use, orig_iuse, cur_use, cur_iuse):
4961                 """Return a set of flags that trigger reinstallation, or None if there
4962                 are no such flags."""
4963                 if "--newuse" in self.myopts:
4964                         flags = set(orig_iuse.symmetric_difference(
4965                                 cur_iuse).difference(forced_flags))
4966                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4967                                 cur_iuse.intersection(cur_use)))
4968                         if flags:
4969                                 return flags
4970                 elif "changed-use" == self.myopts.get("--reinstall"):
4971                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4972                                 cur_iuse.intersection(cur_use))
4973                         if flags:
4974                                 return flags
4975                 return None
4976
4977         def _create_graph(self, allow_unsatisfied=False):
4978                 dep_stack = self._dep_stack
4979                 while dep_stack:
4980                         self.spinner.update()
4981                         dep = dep_stack.pop()
4982                         if isinstance(dep, Package):
4983                                 if not self._add_pkg_deps(dep,
4984                                         allow_unsatisfied=allow_unsatisfied):
4985                                         return 0
4986                                 continue
4987                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4988                                 return 0
4989                 return 1
4990
4991         def _add_dep(self, dep, allow_unsatisfied=False):
4992                 debug = "--debug" in self.myopts
4993                 buildpkgonly = "--buildpkgonly" in self.myopts
4994                 nodeps = "--nodeps" in self.myopts
4995                 empty = "empty" in self.myparams
4996                 deep = "deep" in self.myparams
4997                 update = "--update" in self.myopts and dep.depth <= 1
4998                 if dep.blocker:
4999                         if not buildpkgonly and \
5000                                 not nodeps and \
5001                                 dep.parent not in self._slot_collision_nodes:
5002                                 if dep.parent.onlydeps:
5003                                         # It's safe to ignore blockers if the
5004                                         # parent is an --onlydeps node.
5005                                         return 1
5006                                 # The blocker applies to the root where
5007                                 # the parent is or will be installed.
5008                                 blocker = Blocker(atom=dep.atom,
5009                                         eapi=dep.parent.metadata["EAPI"],
5010                                         root=dep.parent.root)
5011                                 self._blocker_parents.add(blocker, dep.parent)
5012                         return 1
5013                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5014                         onlydeps=dep.onlydeps)
5015                 if not dep_pkg:
5016                         if dep.priority.optional:
5017                                 # This could be an unecessary build-time dep
5018                                 # pulled in by --with-bdeps=y.
5019                                 return 1
5020                         if allow_unsatisfied:
5021                                 self._unsatisfied_deps.append(dep)
5022                                 return 1
5023                         self._unsatisfied_deps_for_display.append(
5024                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5025                         return 0
5026                 # In some cases, dep_check will return deps that shouldn't
5027                 # be proccessed any further, so they are identified and
5028                 # discarded here. Try to discard as few as possible since
5029                 # discarded dependencies reduce the amount of information
5030                 # available for optimization of merge order.
5031                 if dep.priority.satisfied and \
5032                         not dep_pkg.installed and \
5033                         not (existing_node or empty or deep or update):
5034                         myarg = None
5035                         if dep.root == self.target_root:
5036                                 try:
5037                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5038                                 except StopIteration:
5039                                         pass
5040                                 except portage.exception.InvalidDependString:
5041                                         if not dep_pkg.installed:
5042                                                 # This shouldn't happen since the package
5043                                                 # should have been masked.
5044                                                 raise
5045                         if not myarg:
5046                                 self._ignored_deps.append(dep)
5047                                 return 1
5048
5049                 if not self._add_pkg(dep_pkg, dep):
5050                         return 0
5051                 return 1
5052
5053         def _add_pkg(self, pkg, dep):
5054                 myparent = None
5055                 priority = None
5056                 depth = 0
5057                 if dep is None:
5058                         dep = Dependency()
5059                 else:
5060                         myparent = dep.parent
5061                         priority = dep.priority
5062                         depth = dep.depth
5063                 if priority is None:
5064                         priority = DepPriority()
5065                 """
5066                 Fills the digraph with nodes comprised of packages to merge.
5067                 mybigkey is the package spec of the package to merge.
5068                 myparent is the package depending on mybigkey ( or None )
5069                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5070                         Think --onlydeps, we need to ignore packages in that case.
5071                 #stuff to add:
5072                 #SLOT-aware emerge
5073                 #IUSE-aware emerge -> USE DEP aware depgraph
5074                 #"no downgrade" emerge
5075                 """
5076                 # Ensure that the dependencies of the same package
5077                 # are never processed more than once.
5078                 previously_added = pkg in self.digraph
5079
5080                 # select the correct /var database that we'll be checking against
5081                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5082                 pkgsettings = self.pkgsettings[pkg.root]
5083
5084                 arg_atoms = None
5085                 if True:
5086                         try:
5087                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5088                         except portage.exception.InvalidDependString, e:
5089                                 if not pkg.installed:
5090                                         show_invalid_depstring_notice(
5091                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5092                                         return 0
5093                                 del e
5094
5095                 if not pkg.onlydeps:
5096                         if not pkg.installed and \
5097                                 "empty" not in self.myparams and \
5098                                 vardbapi.match(pkg.slot_atom):
5099                                 # Increase the priority of dependencies on packages that
5100                                 # are being rebuilt. This optimizes merge order so that
5101                                 # dependencies are rebuilt/updated as soon as possible,
5102                                 # which is needed especially when emerge is called by
5103                                 # revdep-rebuild since dependencies may be affected by ABI
5104                                 # breakage that has rendered them useless. Don't adjust
5105                                 # priority here when in "empty" mode since all packages
5106                                 # are being merged in that case.
5107                                 priority.rebuild = True
5108
5109                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5110                         slot_collision = False
5111                         if existing_node:
5112                                 existing_node_matches = pkg.cpv == existing_node.cpv
5113                                 if existing_node_matches and \
5114                                         pkg != existing_node and \
5115                                         dep.atom is not None:
5116                                         # Use package set for matching since it will match via
5117                                         # PROVIDE when necessary, while match_from_list does not.
5118                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5119                                         if not atom_set.findAtomForPackage(existing_node):
5120                                                 existing_node_matches = False
5121                                 if existing_node_matches:
5122                                         # The existing node can be reused.
5123                                         if arg_atoms:
5124                                                 for parent_atom in arg_atoms:
5125                                                         parent, atom = parent_atom
5126                                                         self.digraph.add(existing_node, parent,
5127                                                                 priority=priority)
5128                                                         self._add_parent_atom(existing_node, parent_atom)
5129                                         # If a direct circular dependency is not an unsatisfied
5130                                         # buildtime dependency then drop it here since otherwise
5131                                         # it can skew the merge order calculation in an unwanted
5132                                         # way.
5133                                         if existing_node != myparent or \
5134                                                 (priority.buildtime and not priority.satisfied):
5135                                                 self.digraph.addnode(existing_node, myparent,
5136                                                         priority=priority)
5137                                                 if dep.atom is not None and dep.parent is not None:
5138                                                         self._add_parent_atom(existing_node,
5139                                                                 (dep.parent, dep.atom))
5140                                         return 1
5141                                 else:
5142
5143                                         # A slot collision has occurred.  Sometimes this coincides
5144                                         # with unresolvable blockers, so the slot collision will be
5145                                         # shown later if there are no unresolvable blockers.
5146                                         self._add_slot_conflict(pkg)
5147                                         slot_collision = True
5148
5149                         if slot_collision:
5150                                 # Now add this node to the graph so that self.display()
5151                                 # can show use flags and --tree portage.output.  This node is
5152                                 # only being partially added to the graph.  It must not be
5153                                 # allowed to interfere with the other nodes that have been
5154                                 # added.  Do not overwrite data for existing nodes in
5155                                 # self.mydbapi since that data will be used for blocker
5156                                 # validation.
5157                                 # Even though the graph is now invalid, continue to process
5158                                 # dependencies so that things like --fetchonly can still
5159                                 # function despite collisions.
5160                                 pass
5161                         elif not previously_added:
5162                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5163                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5164                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5165
5166                         if not pkg.installed:
5167                                 # Allow this package to satisfy old-style virtuals in case it
5168                                 # doesn't already. Any pre-existing providers will be preferred
5169                                 # over this one.
5170                                 try:
5171                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5172                                         # For consistency, also update the global virtuals.
5173                                         settings = self.roots[pkg.root].settings
5174                                         settings.unlock()
5175                                         settings.setinst(pkg.cpv, pkg.metadata)
5176                                         settings.lock()
5177                                 except portage.exception.InvalidDependString, e:
5178                                         show_invalid_depstring_notice(
5179                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5180                                         del e
5181                                         return 0
5182
5183                 if arg_atoms:
5184                         self._set_nodes.add(pkg)
5185
5186                 # Do this even when addme is False (--onlydeps) so that the
5187                 # parent/child relationship is always known in case
5188                 # self._show_slot_collision_notice() needs to be called later.
5189                 self.digraph.add(pkg, myparent, priority=priority)
5190                 if dep.atom is not None and dep.parent is not None:
5191                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5192
5193                 if arg_atoms:
5194                         for parent_atom in arg_atoms:
5195                                 parent, atom = parent_atom
5196                                 self.digraph.add(pkg, parent, priority=priority)
5197                                 self._add_parent_atom(pkg, parent_atom)
5198
5199                 """ This section determines whether we go deeper into dependencies or not.
5200                     We want to go deeper on a few occasions:
5201                     Installing package A, we need to make sure package A's deps are met.
5202                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5203                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5204                 """
5205                 dep_stack = self._dep_stack
5206                 if "recurse" not in self.myparams:
5207                         return 1
5208                 elif pkg.installed and \
5209                         "deep" not in self.myparams:
5210                         dep_stack = self._ignored_deps
5211
5212                 self.spinner.update()
5213
5214                 if arg_atoms:
5215                         depth = 0
5216                 pkg.depth = depth
5217                 if not previously_added:
5218                         dep_stack.append(pkg)
5219                 return 1
5220
5221         def _add_parent_atom(self, pkg, parent_atom):
5222                 parent_atoms = self._parent_atoms.get(pkg)
5223                 if parent_atoms is None:
5224                         parent_atoms = set()
5225                         self._parent_atoms[pkg] = parent_atoms
5226                 parent_atoms.add(parent_atom)
5227
5228         def _add_slot_conflict(self, pkg):
5229                 self._slot_collision_nodes.add(pkg)
5230                 slot_key = (pkg.slot_atom, pkg.root)
5231                 slot_nodes = self._slot_collision_info.get(slot_key)
5232                 if slot_nodes is None:
5233                         slot_nodes = set()
5234                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5235                         self._slot_collision_info[slot_key] = slot_nodes
5236                 slot_nodes.add(pkg)
5237
5238         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5239
5240                 mytype = pkg.type_name
5241                 myroot = pkg.root
5242                 mykey = pkg.cpv
5243                 metadata = pkg.metadata
5244                 myuse = pkg.use.enabled
5245                 jbigkey = pkg
5246                 depth = pkg.depth + 1
5247                 removal_action = "remove" in self.myparams
5248
5249                 edepend={}
5250                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5251                 for k in depkeys:
5252                         edepend[k] = metadata[k]
5253
5254                 if not pkg.built and \
5255                         "--buildpkgonly" in self.myopts and \
5256                         "deep" not in self.myparams and \
5257                         "empty" not in self.myparams:
5258                         edepend["RDEPEND"] = ""
5259                         edepend["PDEPEND"] = ""
5260                 bdeps_optional = False
5261
5262                 if pkg.built and not removal_action:
5263                         if self.myopts.get("--with-bdeps", "n") == "y":
5264                                 # Pull in build time deps as requested, but marked them as
5265                                 # "optional" since they are not strictly required. This allows
5266                                 # more freedom in the merge order calculation for solving
5267                                 # circular dependencies. Don't convert to PDEPEND since that
5268                                 # could make --with-bdeps=y less effective if it is used to
5269                                 # adjust merge order to prevent built_with_use() calls from
5270                                 # failing.
5271                                 bdeps_optional = True
5272                         else:
5273                                 # built packages do not have build time dependencies.
5274                                 edepend["DEPEND"] = ""
5275
5276                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5277                         edepend["DEPEND"] = ""
5278
5279                 bdeps_root = "/"
5280                 if self.target_root != "/":
5281                         if "--root-deps" in self.myopts:
5282                                         bdeps_root = myroot
5283                         if "--rdeps-only" in self.myopts:
5284                                         bdeps_root = "/"
5285                                         edepend["DEPEND"] = ""
5286
5287                 deps = (
5288                         (bdeps_root, edepend["DEPEND"],
5289                                 self._priority(buildtime=(not bdeps_optional),
5290                                 optional=bdeps_optional)),
5291                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5292                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5293                 )
5294
5295                 debug = "--debug" in self.myopts
5296                 strict = mytype != "installed"
5297                 try:
5298                         for dep_root, dep_string, dep_priority in deps:
5299                                 if not dep_string:
5300                                         continue
5301                                 if debug:
5302                                         print
5303                                         print "Parent:   ", jbigkey
5304                                         print "Depstring:", dep_string
5305                                         print "Priority:", dep_priority
5306                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5307                                 try:
5308                                         selected_atoms = self._select_atoms(dep_root,
5309                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5310                                                 priority=dep_priority)
5311                                 except portage.exception.InvalidDependString, e:
5312                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5313                                         return 0
5314                                 if debug:
5315                                         print "Candidates:", selected_atoms
5316
5317                                 for atom in selected_atoms:
5318                                         try:
5319
5320                                                 atom = portage.dep.Atom(atom)
5321
5322                                                 mypriority = dep_priority.copy()
5323                                                 if not atom.blocker and vardb.match(atom):
5324                                                         mypriority.satisfied = True
5325
5326                                                 if not self._add_dep(Dependency(atom=atom,
5327                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5328                                                         priority=mypriority, root=dep_root),
5329                                                         allow_unsatisfied=allow_unsatisfied):
5330                                                         return 0
5331
5332                                         except portage.exception.InvalidAtom, e:
5333                                                 show_invalid_depstring_notice(
5334                                                         pkg, dep_string, str(e))
5335                                                 del e
5336                                                 if not pkg.installed:
5337                                                         return 0
5338
5339                                 if debug:
5340                                         print "Exiting...", jbigkey
5341                 except portage.exception.AmbiguousPackageName, e:
5342                         pkgs = e.args[0]
5343                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5344                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5345                         for cpv in pkgs:
5346                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5347                         portage.writemsg("\n", noiselevel=-1)
5348                         if mytype == "binary":
5349                                 portage.writemsg(
5350                                         "!!! This binary package cannot be installed: '%s'\n" % \
5351                                         mykey, noiselevel=-1)
5352                         elif mytype == "ebuild":
5353                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5354                                 myebuild, mylocation = portdb.findname2(mykey)
5355                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5356                                         "'%s'\n" % myebuild, noiselevel=-1)
5357                         portage.writemsg("!!! Please notify the package maintainer " + \
5358                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5359                         return 0
5360                 return 1
5361
5362         def _priority(self, **kwargs):
5363                 if "remove" in self.myparams:
5364                         priority_constructor = UnmergeDepPriority
5365                 else:
5366                         priority_constructor = DepPriority
5367                 return priority_constructor(**kwargs)
5368
5369         def _dep_expand(self, root_config, atom_without_category):
5370                 """
5371                 @param root_config: a root config instance
5372                 @type root_config: RootConfig
5373                 @param atom_without_category: an atom without a category component
5374                 @type atom_without_category: String
5375                 @rtype: list
5376                 @returns: a list of atoms containing categories (possibly empty)
5377                 """
5378                 null_cp = portage.dep_getkey(insert_category_into_atom(
5379                         atom_without_category, "null"))
5380                 cat, atom_pn = portage.catsplit(null_cp)
5381
5382                 dbs = self._filtered_trees[root_config.root]["dbs"]
5383                 categories = set()
5384                 for db, pkg_type, built, installed, db_keys in dbs:
5385                         for cat in db.categories:
5386                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5387                                         categories.add(cat)
5388
5389                 deps = []
5390                 for cat in categories:
5391                         deps.append(insert_category_into_atom(
5392                                 atom_without_category, cat))
5393                 return deps
5394
5395         def _have_new_virt(self, root, atom_cp):
5396                 ret = False
5397                 for db, pkg_type, built, installed, db_keys in \
5398                         self._filtered_trees[root]["dbs"]:
5399                         if db.cp_list(atom_cp):
5400                                 ret = True
5401                                 break
5402                 return ret
5403
5404         def _iter_atoms_for_pkg(self, pkg):
5405                 # TODO: add multiple $ROOT support
5406                 if pkg.root != self.target_root:
5407                         return
5408                 atom_arg_map = self._atom_arg_map
5409                 root_config = self.roots[pkg.root]
5410                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5411                         atom_cp = portage.dep_getkey(atom)
5412                         if atom_cp != pkg.cp and \
5413                                 self._have_new_virt(pkg.root, atom_cp):
5414                                 continue
5415                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5416                         visible_pkgs.reverse() # descending order
5417                         higher_slot = None
5418                         for visible_pkg in visible_pkgs:
5419                                 if visible_pkg.cp != atom_cp:
5420                                         continue
5421                                 if pkg >= visible_pkg:
5422                                         # This is descending order, and we're not
5423                                         # interested in any versions <= pkg given.
5424                                         break
5425                                 if pkg.slot_atom != visible_pkg.slot_atom:
5426                                         higher_slot = visible_pkg
5427                                         break
5428                         if higher_slot is not None:
5429                                 continue
5430                         for arg in atom_arg_map[(atom, pkg.root)]:
5431                                 if isinstance(arg, PackageArg) and \
5432                                         arg.package != pkg:
5433                                         continue
5434                                 yield arg, atom
5435
5436         def select_files(self, myfiles):
5437                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5438                 appropriate depgraph and return a favorite list."""
5439                 debug = "--debug" in self.myopts
5440                 root_config = self.roots[self.target_root]
5441                 sets = root_config.sets
5442                 getSetAtoms = root_config.setconfig.getSetAtoms
5443                 myfavorites=[]
5444                 myroot = self.target_root
5445                 dbs = self._filtered_trees[myroot]["dbs"]
5446                 vardb = self.trees[myroot]["vartree"].dbapi
5447                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5448                 portdb = self.trees[myroot]["porttree"].dbapi
5449                 bindb = self.trees[myroot]["bintree"].dbapi
5450                 pkgsettings = self.pkgsettings[myroot]
5451                 args = []
5452                 onlydeps = "--onlydeps" in self.myopts
5453                 lookup_owners = []
5454                 for x in myfiles:
5455                         ext = os.path.splitext(x)[1]
5456                         if ext==".tbz2":
5457                                 if not os.path.exists(x):
5458                                         if os.path.exists(
5459                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5460                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5461                                         elif os.path.exists(
5462                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5463                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5464                                         else:
5465                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5466                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5467                                                 return 0, myfavorites
5468                                 mytbz2=portage.xpak.tbz2(x)
5469                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5470                                 if os.path.realpath(x) != \
5471                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5472                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5473                                         return 0, myfavorites
5474                                 db_keys = list(bindb._aux_cache_keys)
5475                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5476                                 pkg = Package(type_name="binary", root_config=root_config,
5477                                         cpv=mykey, built=True, metadata=metadata,
5478                                         onlydeps=onlydeps)
5479                                 self._pkg_cache[pkg] = pkg
5480                                 args.append(PackageArg(arg=x, package=pkg,
5481                                         root_config=root_config))
5482                         elif ext==".ebuild":
5483                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5484                                 pkgdir = os.path.dirname(ebuild_path)
5485                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5486                                 cp = pkgdir[len(tree_root)+1:]
5487                                 e = portage.exception.PackageNotFound(
5488                                         ("%s is not in a valid portage tree " + \
5489                                         "hierarchy or does not exist") % x)
5490                                 if not portage.isvalidatom(cp):
5491                                         raise e
5492                                 cat = portage.catsplit(cp)[0]
5493                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5494                                 if not portage.isvalidatom("="+mykey):
5495                                         raise e
5496                                 ebuild_path = portdb.findname(mykey)
5497                                 if ebuild_path:
5498                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5499                                                 cp, os.path.basename(ebuild_path)):
5500                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5501                                                 return 0, myfavorites
5502                                         if mykey not in portdb.xmatch(
5503                                                 "match-visible", portage.dep_getkey(mykey)):
5504                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5505                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5506                                                 print colorize("BAD", "*** page for details.")
5507                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5508                                                         "Continuing...")
5509                                 else:
5510                                         raise portage.exception.PackageNotFound(
5511                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5512                                 db_keys = list(portdb._aux_cache_keys)
5513                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5514                                 pkg = Package(type_name="ebuild", root_config=root_config,
5515                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5516                                 pkgsettings.setcpv(pkg)
5517                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5518                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5519                                 self._pkg_cache[pkg] = pkg
5520                                 args.append(PackageArg(arg=x, package=pkg,
5521                                         root_config=root_config))
5522                         elif x.startswith(os.path.sep):
5523                                 if not x.startswith(myroot):
5524                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5525                                                 " $ROOT.\n") % x, noiselevel=-1)
5526                                         return 0, []
5527                                 # Queue these up since it's most efficient to handle
5528                                 # multiple files in a single iter_owners() call.
5529                                 lookup_owners.append(x)
5530                         else:
5531                                 if x in ("system", "world"):
5532                                         x = SETPREFIX + x
5533                                 if x.startswith(SETPREFIX):
5534                                         s = x[len(SETPREFIX):]
5535                                         if s not in sets:
5536                                                 raise portage.exception.PackageSetNotFound(s)
5537                                         if s in self._sets:
5538                                                 continue
5539                                         # Recursively expand sets so that containment tests in
5540                                         # self._get_parent_sets() properly match atoms in nested
5541                                         # sets (like if world contains system).
5542                                         expanded_set = InternalPackageSet(
5543                                                 initial_atoms=getSetAtoms(s))
5544                                         self._sets[s] = expanded_set
5545                                         args.append(SetArg(arg=x, set=expanded_set,
5546                                                 root_config=root_config))
5547                                         continue
5548                                 if not is_valid_package_atom(x):
5549                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5550                                                 noiselevel=-1)
5551                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5552                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5553                                         return (0,[])
5554                                 # Don't expand categories or old-style virtuals here unless
5555                                 # necessary. Expansion of old-style virtuals here causes at
5556                                 # least the following problems:
5557                                 #   1) It's more difficult to determine which set(s) an atom
5558                                 #      came from, if any.
5559                                 #   2) It takes away freedom from the resolver to choose other
5560                                 #      possible expansions when necessary.
5561                                 if "/" in x:
5562                                         args.append(AtomArg(arg=x, atom=x,
5563                                                 root_config=root_config))
5564                                         continue
5565                                 expanded_atoms = self._dep_expand(root_config, x)
5566                                 installed_cp_set = set()
5567                                 for atom in expanded_atoms:
5568                                         atom_cp = portage.dep_getkey(atom)
5569                                         if vardb.cp_list(atom_cp):
5570                                                 installed_cp_set.add(atom_cp)
5571                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5572                                         installed_cp = iter(installed_cp_set).next()
5573                                         expanded_atoms = [atom for atom in expanded_atoms \
5574                                                 if portage.dep_getkey(atom) == installed_cp]
5575
5576                                 if len(expanded_atoms) > 1:
5577                                         print
5578                                         print
5579                                         ambiguous_package_name(x, expanded_atoms, root_config,
5580                                                 self.spinner, self.myopts)
5581                                         return False, myfavorites
5582                                 if expanded_atoms:
5583                                         atom = expanded_atoms[0]
5584                                 else:
5585                                         null_atom = insert_category_into_atom(x, "null")
5586                                         null_cp = portage.dep_getkey(null_atom)
5587                                         cat, atom_pn = portage.catsplit(null_cp)
5588                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5589                                         if virts_p:
5590                                                 # Allow the depgraph to choose which virtual.
5591                                                 atom = insert_category_into_atom(x, "virtual")
5592                                         else:
5593                                                 atom = insert_category_into_atom(x, "null")
5594
5595                                 args.append(AtomArg(arg=x, atom=atom,
5596                                         root_config=root_config))
5597
5598                 if lookup_owners:
5599                         relative_paths = []
5600                         search_for_multiple = False
5601                         if len(lookup_owners) > 1:
5602                                 search_for_multiple = True
5603
5604                         for x in lookup_owners:
5605                                 if not search_for_multiple and os.path.isdir(x):
5606                                         search_for_multiple = True
5607                                 relative_paths.append(x[len(myroot):])
5608
5609                         owners = set()
5610                         for pkg, relative_path in \
5611                                 real_vardb._owners.iter_owners(relative_paths):
5612                                 owners.add(pkg.mycpv)
5613                                 if not search_for_multiple:
5614                                         break
5615
5616                         if not owners:
5617                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5618                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5619                                 return 0, []
5620
5621                         for cpv in owners:
5622                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5623                                 if not slot:
5624                                         # portage now masks packages with missing slot, but it's
5625                                         # possible that one was installed by an older version
5626                                         atom = portage.cpv_getkey(cpv)
5627                                 else:
5628                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5629                                 args.append(AtomArg(arg=atom, atom=atom,
5630                                         root_config=root_config))
5631
5632                 if "--update" in self.myopts:
5633                         # In some cases, the greedy slots behavior can pull in a slot that
5634                         # the user would want to uninstall due to it being blocked by a
5635                         # newer version in a different slot. Therefore, it's necessary to
5636                         # detect and discard any that should be uninstalled. Each time
5637                         # that arguments are updated, package selections are repeated in
5638                         # order to ensure consistency with the current arguments:
5639                         #
5640                         #  1) Initialize args
5641                         #  2) Select packages and generate initial greedy atoms
5642                         #  3) Update args with greedy atoms
5643                         #  4) Select packages and generate greedy atoms again, while
5644                         #     accounting for any blockers between selected packages
5645                         #  5) Update args with revised greedy atoms
5646
5647                         self._set_args(args)
5648                         greedy_args = []
5649                         for arg in args:
5650                                 greedy_args.append(arg)
5651                                 if not isinstance(arg, AtomArg):
5652                                         continue
5653                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5654                                         greedy_args.append(
5655                                                 AtomArg(arg=arg.arg, atom=atom,
5656                                                         root_config=arg.root_config))
5657
5658                         self._set_args(greedy_args)
5659                         del greedy_args
5660
5661                         # Revise greedy atoms, accounting for any blockers
5662                         # between selected packages.
5663                         revised_greedy_args = []
5664                         for arg in args:
5665                                 revised_greedy_args.append(arg)
5666                                 if not isinstance(arg, AtomArg):
5667                                         continue
5668                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5669                                         blocker_lookahead=True):
5670                                         revised_greedy_args.append(
5671                                                 AtomArg(arg=arg.arg, atom=atom,
5672                                                         root_config=arg.root_config))
5673                         args = revised_greedy_args
5674                         del revised_greedy_args
5675
5676                 self._set_args(args)
5677
5678                 myfavorites = set(myfavorites)
5679                 for arg in args:
5680                         if isinstance(arg, (AtomArg, PackageArg)):
5681                                 myfavorites.add(arg.atom)
5682                         elif isinstance(arg, SetArg):
5683                                 myfavorites.add(arg.arg)
5684                 myfavorites = list(myfavorites)
5685
5686                 pprovideddict = pkgsettings.pprovideddict
5687                 if debug:
5688                         portage.writemsg("\n", noiselevel=-1)
5689                 # Order needs to be preserved since a feature of --nodeps
5690                 # is to allow the user to force a specific merge order.
5691                 args.reverse()
5692                 while args:
5693                         arg = args.pop()
5694                         for atom in arg.set:
5695                                 self.spinner.update()
5696                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5697                                         root=myroot, parent=arg)
5698                                 atom_cp = portage.dep_getkey(atom)
5699                                 try:
5700                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5701                                         if pprovided and portage.match_from_list(atom, pprovided):
5702                                                 # A provided package has been specified on the command line.
5703                                                 self._pprovided_args.append((arg, atom))
5704                                                 continue
5705                                         if isinstance(arg, PackageArg):
5706                                                 if not self._add_pkg(arg.package, dep) or \
5707                                                         not self._create_graph():
5708                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5709                                                                 "dependencies for %s\n") % arg.arg)
5710                                                         return 0, myfavorites
5711                                                 continue
5712                                         if debug:
5713                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5714                                                         (arg, atom), noiselevel=-1)
5715                                         pkg, existing_node = self._select_package(
5716                                                 myroot, atom, onlydeps=onlydeps)
5717                                         if not pkg:
5718                                                 if not (isinstance(arg, SetArg) and \
5719                                                         arg.name in ("system", "world")):
5720                                                         self._unsatisfied_deps_for_display.append(
5721                                                                 ((myroot, atom), {}))
5722                                                         return 0, myfavorites
5723                                                 self._missing_args.append((arg, atom))
5724                                                 continue
5725                                         if atom_cp != pkg.cp:
5726                                                 # For old-style virtuals, we need to repeat the
5727                                                 # package.provided check against the selected package.
5728                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5729                                                 pprovided = pprovideddict.get(pkg.cp)
5730                                                 if pprovided and \
5731                                                         portage.match_from_list(expanded_atom, pprovided):
5732                                                         # A provided package has been
5733                                                         # specified on the command line.
5734                                                         self._pprovided_args.append((arg, atom))
5735                                                         continue
5736                                         if pkg.installed and "selective" not in self.myparams:
5737                                                 self._unsatisfied_deps_for_display.append(
5738                                                         ((myroot, atom), {}))
5739                                                 # Previous behavior was to bail out in this case, but
5740                                                 # since the dep is satisfied by the installed package,
5741                                                 # it's more friendly to continue building the graph
5742                                                 # and just show a warning message. Therefore, only bail
5743                                                 # out here if the atom is not from either the system or
5744                                                 # world set.
5745                                                 if not (isinstance(arg, SetArg) and \
5746                                                         arg.name in ("system", "world")):
5747                                                         return 0, myfavorites
5748
5749                                         # Add the selected package to the graph as soon as possible
5750                                         # so that later dep_check() calls can use it as feedback
5751                                         # for making more consistent atom selections.
5752                                         if not self._add_pkg(pkg, dep):
5753                                                 if isinstance(arg, SetArg):
5754                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5755                                                                 "dependencies for %s from %s\n") % \
5756                                                                 (atom, arg.arg))
5757                                                 else:
5758                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5759                                                                 "dependencies for %s\n") % atom)
5760                                                 return 0, myfavorites
5761
5762                                 except portage.exception.MissingSignature, e:
5763                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5764                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5765                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5766                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5767                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5768                                         return 0, myfavorites
5769                                 except portage.exception.InvalidSignature, e:
5770                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5771                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5772                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5773                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5774                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5775                                         return 0, myfavorites
5776                                 except SystemExit, e:
5777                                         raise # Needed else can't exit
5778                                 except Exception, e:
5779                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5780                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5781                                         raise
5782
5783                 # Now that the root packages have been added to the graph,
5784                 # process the dependencies.
5785                 if not self._create_graph():
5786                         return 0, myfavorites
5787
5788                 missing=0
5789                 if "--usepkgonly" in self.myopts:
5790                         for xs in self.digraph.all_nodes():
5791                                 if not isinstance(xs, Package):
5792                                         continue
5793                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5794                                         if missing == 0:
5795                                                 print
5796                                         missing += 1
5797                                         print "Missing binary for:",xs[2]
5798
5799                 try:
5800                         self.altlist()
5801                 except self._unknown_internal_error:
5802                         return False, myfavorites
5803
5804                 # We're true here unless we are missing binaries.
5805                 return (not missing,myfavorites)
5806
5807         def _set_args(self, args):
5808                 """
5809                 Create the "args" package set from atoms and packages given as
5810                 arguments. This method can be called multiple times if necessary.
5811                 The package selection cache is automatically invalidated, since
5812                 arguments influence package selections.
5813                 """
5814                 args_set = self._sets["args"]
5815                 args_set.clear()
5816                 for arg in args:
5817                         if not isinstance(arg, (AtomArg, PackageArg)):
5818                                 continue
5819                         atom = arg.atom
5820                         if atom in args_set:
5821                                 continue
5822                         args_set.add(atom)
5823
5824                 self._set_atoms.clear()
5825                 self._set_atoms.update(chain(*self._sets.itervalues()))
5826                 atom_arg_map = self._atom_arg_map
5827                 atom_arg_map.clear()
5828                 for arg in args:
5829                         for atom in arg.set:
5830                                 atom_key = (atom, arg.root_config.root)
5831                                 refs = atom_arg_map.get(atom_key)
5832                                 if refs is None:
5833                                         refs = []
5834                                         atom_arg_map[atom_key] = refs
5835                                         if arg not in refs:
5836                                                 refs.append(arg)
5837
5838                 # Invalidate the package selection cache, since
5839                 # arguments influence package selections.
5840                 self._highest_pkg_cache.clear()
5841                 for trees in self._filtered_trees.itervalues():
5842                         trees["porttree"].dbapi._clear_cache()
5843
5844         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5845                 """
5846                 Return a list of slot atoms corresponding to installed slots that
5847                 differ from the slot of the highest visible match. When
5848                 blocker_lookahead is True, slot atoms that would trigger a blocker
5849                 conflict are automatically discarded, potentially allowing automatic
5850                 uninstallation of older slots when appropriate.
5851                 """
5852                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5853                 if highest_pkg is None:
5854                         return []
5855                 vardb = root_config.trees["vartree"].dbapi
5856                 slots = set()
5857                 for cpv in vardb.match(atom):
5858                         # don't mix new virtuals with old virtuals
5859                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5860                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5861
5862                 slots.add(highest_pkg.metadata["SLOT"])
5863                 if len(slots) == 1:
5864                         return []
5865                 greedy_pkgs = []
5866                 slots.remove(highest_pkg.metadata["SLOT"])
5867                 while slots:
5868                         slot = slots.pop()
5869                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5870                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5871                         if pkg is not None and \
5872                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5873                                 greedy_pkgs.append(pkg)
5874                 if not greedy_pkgs:
5875                         return []
5876                 if not blocker_lookahead:
5877                         return [pkg.slot_atom for pkg in greedy_pkgs]
5878
5879                 blockers = {}
5880                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5881                 for pkg in greedy_pkgs + [highest_pkg]:
5882                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5883                         try:
5884                                 atoms = self._select_atoms(
5885                                         pkg.root, dep_str, pkg.use.enabled,
5886                                         parent=pkg, strict=True)
5887                         except portage.exception.InvalidDependString:
5888                                 continue
5889                         blocker_atoms = (x for x in atoms if x.blocker)
5890                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5891
5892                 if highest_pkg not in blockers:
5893                         return []
5894
5895                 # filter packages with invalid deps
5896                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5897
5898                 # filter packages that conflict with highest_pkg
5899                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5900                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5901                         blockers[pkg].findAtomForPackage(highest_pkg))]
5902
5903                 if not greedy_pkgs:
5904                         return []
5905
5906                 # If two packages conflict, discard the lower version.
5907                 discard_pkgs = set()
5908                 greedy_pkgs.sort(reverse=True)
5909                 for i in xrange(len(greedy_pkgs) - 1):
5910                         pkg1 = greedy_pkgs[i]
5911                         if pkg1 in discard_pkgs:
5912                                 continue
5913                         for j in xrange(i + 1, len(greedy_pkgs)):
5914                                 pkg2 = greedy_pkgs[j]
5915                                 if pkg2 in discard_pkgs:
5916                                         continue
5917                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5918                                         blockers[pkg2].findAtomForPackage(pkg1):
5919                                         # pkg1 > pkg2
5920                                         discard_pkgs.add(pkg2)
5921
5922                 return [pkg.slot_atom for pkg in greedy_pkgs \
5923                         if pkg not in discard_pkgs]
5924
5925         def _select_atoms_from_graph(self, *pargs, **kwargs):
5926                 """
5927                 Prefer atoms matching packages that have already been
5928                 added to the graph or those that are installed and have
5929                 not been scheduled for replacement.
5930                 """
5931                 kwargs["trees"] = self._graph_trees
5932                 return self._select_atoms_highest_available(*pargs, **kwargs)
5933
5934         def _select_atoms_highest_available(self, root, depstring,
5935                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5936                 """This will raise InvalidDependString if necessary. If trees is
5937                 None then self._filtered_trees is used."""
5938                 pkgsettings = self.pkgsettings[root]
5939                 if trees is None:
5940                         trees = self._filtered_trees
5941                 if not getattr(priority, "buildtime", False):
5942                         # The parent should only be passed to dep_check() for buildtime
5943                         # dependencies since that's the only case when it's appropriate
5944                         # to trigger the circular dependency avoidance code which uses it.
5945                         # It's important not to trigger the same circular dependency
5946                         # avoidance code for runtime dependencies since it's not needed
5947                         # and it can promote an incorrect package choice.
5948                         parent = None
5949                 if True:
5950                         try:
5951                                 if parent is not None:
5952                                         trees[root]["parent"] = parent
5953                                 if not strict:
5954                                         portage.dep._dep_check_strict = False
5955                                 mycheck = portage.dep_check(depstring, None,
5956                                         pkgsettings, myuse=myuse,
5957                                         myroot=root, trees=trees)
5958                         finally:
5959                                 if parent is not None:
5960                                         trees[root].pop("parent")
5961                                 portage.dep._dep_check_strict = True
5962                         if not mycheck[0]:
5963                                 raise portage.exception.InvalidDependString(mycheck[1])
5964                         selected_atoms = mycheck[1]
5965                 return selected_atoms
5966
5967         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5968                 atom = portage.dep.Atom(atom)
5969                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5970                 atom_without_use = atom
5971                 if atom.use:
5972                         atom_without_use = portage.dep.remove_slot(atom)
5973                         if atom.slot:
5974                                 atom_without_use += ":" + atom.slot
5975                         atom_without_use = portage.dep.Atom(atom_without_use)
5976                 xinfo = '"%s"' % atom
5977                 if arg:
5978                         xinfo='"%s"' % arg
5979                 # Discard null/ from failed cpv_expand category expansion.
5980                 xinfo = xinfo.replace("null/", "")
5981                 masked_packages = []
5982                 missing_use = []
5983                 masked_pkg_instances = set()
5984                 missing_licenses = []
5985                 have_eapi_mask = False
5986                 pkgsettings = self.pkgsettings[root]
5987                 implicit_iuse = pkgsettings._get_implicit_iuse()
5988                 root_config = self.roots[root]
5989                 portdb = self.roots[root].trees["porttree"].dbapi
5990                 dbs = self._filtered_trees[root]["dbs"]
5991                 for db, pkg_type, built, installed, db_keys in dbs:
5992                         if installed:
5993                                 continue
5994                         match = db.match
5995                         if hasattr(db, "xmatch"):
5996                                 cpv_list = db.xmatch("match-all", atom_without_use)
5997                         else:
5998                                 cpv_list = db.match(atom_without_use)
5999                         # descending order
6000                         cpv_list.reverse()
6001                         for cpv in cpv_list:
6002                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6003                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6004                                 if metadata is not None:
6005                                         pkg = Package(built=built, cpv=cpv,
6006                                                 installed=installed, metadata=metadata,
6007                                                 root_config=root_config)
6008                                         if pkg.cp != atom.cp:
6009                                                 # A cpv can be returned from dbapi.match() as an
6010                                                 # old-style virtual match even in cases when the
6011                                                 # package does not actually PROVIDE the virtual.
6012                                                 # Filter out any such false matches here.
6013                                                 if not atom_set.findAtomForPackage(pkg):
6014                                                         continue
6015                                         if mreasons:
6016                                                 masked_pkg_instances.add(pkg)
6017                                         if atom.use:
6018                                                 missing_use.append(pkg)
6019                                                 if not mreasons:
6020                                                         continue
6021                                 masked_packages.append(
6022                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6023
6024                 missing_use_reasons = []
6025                 missing_iuse_reasons = []
6026                 for pkg in missing_use:
6027                         use = pkg.use.enabled
6028                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6029                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6030                         missing_iuse = []
6031                         for x in atom.use.required:
6032                                 if iuse_re.match(x) is None:
6033                                         missing_iuse.append(x)
6034                         mreasons = []
6035                         if missing_iuse:
6036                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6037                                 missing_iuse_reasons.append((pkg, mreasons))
6038                         else:
6039                                 need_enable = sorted(atom.use.enabled.difference(use))
6040                                 need_disable = sorted(atom.use.disabled.intersection(use))
6041                                 if need_enable or need_disable:
6042                                         changes = []
6043                                         changes.extend(colorize("red", "+" + x) \
6044                                                 for x in need_enable)
6045                                         changes.extend(colorize("blue", "-" + x) \
6046                                                 for x in need_disable)
6047                                         mreasons.append("Change USE: %s" % " ".join(changes))
6048                                         missing_use_reasons.append((pkg, mreasons))
6049
6050                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6051                         in missing_use_reasons if pkg not in masked_pkg_instances]
6052
6053                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6054                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6055
6056                 show_missing_use = False
6057                 if unmasked_use_reasons:
6058                         # Only show the latest version.
6059                         show_missing_use = unmasked_use_reasons[:1]
6060                 elif unmasked_iuse_reasons:
6061                         if missing_use_reasons:
6062                                 # All packages with required IUSE are masked,
6063                                 # so display a normal masking message.
6064                                 pass
6065                         else:
6066                                 show_missing_use = unmasked_iuse_reasons
6067
6068                 if show_missing_use:
6069                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6070                         print "!!! One of the following packages is required to complete your request:"
6071                         for pkg, mreasons in show_missing_use:
6072                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6073
6074                 elif masked_packages:
6075                         print "\n!!! " + \
6076                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6077                                 colorize("INFORM", xinfo) + \
6078                                 colorize("BAD", " have been masked.")
6079                         print "!!! One of the following masked packages is required to complete your request:"
6080                         have_eapi_mask = show_masked_packages(masked_packages)
6081                         if have_eapi_mask:
6082                                 print
6083                                 msg = ("The current version of portage supports " + \
6084                                         "EAPI '%s'. You must upgrade to a newer version" + \
6085                                         " of portage before EAPI masked packages can" + \
6086                                         " be installed.") % portage.const.EAPI
6087                                 from textwrap import wrap
6088                                 for line in wrap(msg, 75):
6089                                         print line
6090                         print
6091                         show_mask_docs()
6092                 else:
6093                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6094
6095                 # Show parent nodes and the argument that pulled them in.
6096                 traversed_nodes = set()
6097                 node = myparent
6098                 msg = []
6099                 while node is not None:
6100                         traversed_nodes.add(node)
6101                         msg.append('(dependency required by "%s" [%s])' % \
6102                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6103                         # When traversing to parents, prefer arguments over packages
6104                         # since arguments are root nodes. Never traverse the same
6105                         # package twice, in order to prevent an infinite loop.
6106                         selected_parent = None
6107                         for parent in self.digraph.parent_nodes(node):
6108                                 if isinstance(parent, DependencyArg):
6109                                         msg.append('(dependency required by "%s" [argument])' % \
6110                                                 (colorize('INFORM', str(parent))))
6111                                         selected_parent = None
6112                                         break
6113                                 if parent not in traversed_nodes:
6114                                         selected_parent = parent
6115                         node = selected_parent
6116                 for line in msg:
6117                         print line
6118
6119                 print
6120
6121         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6122                 cache_key = (root, atom, onlydeps)
6123                 ret = self._highest_pkg_cache.get(cache_key)
6124                 if ret is not None:
6125                         pkg, existing = ret
6126                         if pkg and not existing:
6127                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6128                                 if existing and existing == pkg:
6129                                         # Update the cache to reflect that the
6130                                         # package has been added to the graph.
6131                                         ret = pkg, pkg
6132                                         self._highest_pkg_cache[cache_key] = ret
6133                         return ret
6134                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6135                 self._highest_pkg_cache[cache_key] = ret
6136                 pkg, existing = ret
6137                 if pkg is not None:
6138                         settings = pkg.root_config.settings
6139                         if visible(settings, pkg) and not (pkg.installed and \
6140                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6141                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6142                 return ret
6143
6144         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6145                 root_config = self.roots[root]
6146                 pkgsettings = self.pkgsettings[root]
6147                 dbs = self._filtered_trees[root]["dbs"]
6148                 vardb = self.roots[root].trees["vartree"].dbapi
6149                 portdb = self.roots[root].trees["porttree"].dbapi
6150                 # List of acceptable packages, ordered by type preference.
6151                 matched_packages = []
6152                 highest_version = None
6153                 if not isinstance(atom, portage.dep.Atom):
6154                         atom = portage.dep.Atom(atom)
6155                 atom_cp = atom.cp
6156                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6157                 existing_node = None
6158                 myeb = None
6159                 usepkgonly = "--usepkgonly" in self.myopts
6160                 empty = "empty" in self.myparams
6161                 selective = "selective" in self.myparams
6162                 reinstall = False
6163                 noreplace = "--noreplace" in self.myopts
6164                 # Behavior of the "selective" parameter depends on
6165                 # whether or not a package matches an argument atom.
6166                 # If an installed package provides an old-style
6167                 # virtual that is no longer provided by an available
6168                 # package, the installed package may match an argument
6169                 # atom even though none of the available packages do.
6170                 # Therefore, "selective" logic does not consider
6171                 # whether or not an installed package matches an
6172                 # argument atom. It only considers whether or not
6173                 # available packages match argument atoms, which is
6174                 # represented by the found_available_arg flag.
6175                 found_available_arg = False
6176                 for find_existing_node in True, False:
6177                         if existing_node:
6178                                 break
6179                         for db, pkg_type, built, installed, db_keys in dbs:
6180                                 if existing_node:
6181                                         break
6182                                 if installed and not find_existing_node:
6183                                         want_reinstall = reinstall or empty or \
6184                                                 (found_available_arg and not selective)
6185                                         if want_reinstall and matched_packages:
6186                                                 continue
6187                                 if hasattr(db, "xmatch"):
6188                                         cpv_list = db.xmatch("match-all", atom)
6189                                 else:
6190                                         cpv_list = db.match(atom)
6191
6192                                 # USE=multislot can make an installed package appear as if
6193                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6194                                 # won't do any good as long as USE=multislot is enabled since
6195                                 # the newly built package still won't have the expected slot.
6196                                 # Therefore, assume that such SLOT dependencies are already
6197                                 # satisfied rather than forcing a rebuild.
6198                                 if installed and not cpv_list and atom.slot:
6199                                         for cpv in db.match(atom.cp):
6200                                                 slot_available = False
6201                                                 for other_db, other_type, other_built, \
6202                                                         other_installed, other_keys in dbs:
6203                                                         try:
6204                                                                 if atom.slot == \
6205                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6206                                                                         slot_available = True
6207                                                                         break
6208                                                         except KeyError:
6209                                                                 pass
6210                                                 if not slot_available:
6211                                                         continue
6212                                                 inst_pkg = self._pkg(cpv, "installed",
6213                                                         root_config, installed=installed)
6214                                                 # Remove the slot from the atom and verify that
6215                                                 # the package matches the resulting atom.
6216                                                 atom_without_slot = portage.dep.remove_slot(atom)
6217                                                 if atom.use:
6218                                                         atom_without_slot += str(atom.use)
6219                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6220                                                 if portage.match_from_list(
6221                                                         atom_without_slot, [inst_pkg]):
6222                                                         cpv_list = [inst_pkg.cpv]
6223                                                 break
6224
6225                                 if not cpv_list:
6226                                         continue
6227                                 pkg_status = "merge"
6228                                 if installed or onlydeps:
6229                                         pkg_status = "nomerge"
6230                                 # descending order
6231                                 cpv_list.reverse()
6232                                 for cpv in cpv_list:
6233                                         # Make --noreplace take precedence over --newuse.
6234                                         if not installed and noreplace and \
6235                                                 cpv in vardb.match(atom):
6236                                                 # If the installed version is masked, it may
6237                                                 # be necessary to look at lower versions,
6238                                                 # in case there is a visible downgrade.
6239                                                 continue
6240                                         reinstall_for_flags = None
6241                                         cache_key = (pkg_type, root, cpv, pkg_status)
6242                                         calculated_use = True
6243                                         pkg = self._pkg_cache.get(cache_key)
6244                                         if pkg is None:
6245                                                 calculated_use = False
6246                                                 try:
6247                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6248                                                 except KeyError:
6249                                                         continue
6250                                                 pkg = Package(built=built, cpv=cpv,
6251                                                         installed=installed, metadata=metadata,
6252                                                         onlydeps=onlydeps, root_config=root_config,
6253                                                         type_name=pkg_type)
6254                                                 metadata = pkg.metadata
6255                                                 if not built:
6256                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6257                                                 if not built and ("?" in metadata["LICENSE"] or \
6258                                                         "?" in metadata["PROVIDE"]):
6259                                                         # This is avoided whenever possible because
6260                                                         # it's expensive. It only needs to be done here
6261                                                         # if it has an effect on visibility.
6262                                                         pkgsettings.setcpv(pkg)
6263                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6264                                                         calculated_use = True
6265                                                 self._pkg_cache[pkg] = pkg
6266
6267                                         if not installed or (built and matched_packages):
6268                                                 # Only enforce visibility on installed packages
6269                                                 # if there is at least one other visible package
6270                                                 # available. By filtering installed masked packages
6271                                                 # here, packages that have been masked since they
6272                                                 # were installed can be automatically downgraded
6273                                                 # to an unmasked version.
6274                                                 try:
6275                                                         if not visible(pkgsettings, pkg):
6276                                                                 continue
6277                                                 except portage.exception.InvalidDependString:
6278                                                         if not installed:
6279                                                                 continue
6280
6281                                                 # Enable upgrade or downgrade to a version
6282                                                 # with visible KEYWORDS when the installed
6283                                                 # version is masked by KEYWORDS, but never
6284                                                 # reinstall the same exact version only due
6285                                                 # to a KEYWORDS mask.
6286                                                 if built and matched_packages:
6287
6288                                                         different_version = None
6289                                                         for avail_pkg in matched_packages:
6290                                                                 if not portage.dep.cpvequal(
6291                                                                         pkg.cpv, avail_pkg.cpv):
6292                                                                         different_version = avail_pkg
6293                                                                         break
6294                                                         if different_version is not None:
6295
6296                                                                 if installed and \
6297                                                                         pkgsettings._getMissingKeywords(
6298                                                                         pkg.cpv, pkg.metadata):
6299                                                                         continue
6300
6301                                                                 # If the ebuild no longer exists or it's
6302                                                                 # keywords have been dropped, reject built
6303                                                                 # instances (installed or binary).
6304                                                                 # If --usepkgonly is enabled, assume that
6305                                                                 # the ebuild status should be ignored.
6306                                                                 if not usepkgonly:
6307                                                                         try:
6308                                                                                 pkg_eb = self._pkg(
6309                                                                                         pkg.cpv, "ebuild", root_config)
6310                                                                         except portage.exception.PackageNotFound:
6311                                                                                 continue
6312                                                                         else:
6313                                                                                 if not visible(pkgsettings, pkg_eb):
6314                                                                                         continue
6315
6316                                         if not pkg.built and not calculated_use:
6317                                                 # This is avoided whenever possible because
6318                                                 # it's expensive.
6319                                                 pkgsettings.setcpv(pkg)
6320                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6321
6322                                         if pkg.cp != atom.cp:
6323                                                 # A cpv can be returned from dbapi.match() as an
6324                                                 # old-style virtual match even in cases when the
6325                                                 # package does not actually PROVIDE the virtual.
6326                                                 # Filter out any such false matches here.
6327                                                 if not atom_set.findAtomForPackage(pkg):
6328                                                         continue
6329
6330                                         myarg = None
6331                                         if root == self.target_root:
6332                                                 try:
6333                                                         # Ebuild USE must have been calculated prior
6334                                                         # to this point, in case atoms have USE deps.
6335                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6336                                                 except StopIteration:
6337                                                         pass
6338                                                 except portage.exception.InvalidDependString:
6339                                                         if not installed:
6340                                                                 # masked by corruption
6341                                                                 continue
6342                                         if not installed and myarg:
6343                                                 found_available_arg = True
6344
6345                                         if atom.use and not pkg.built:
6346                                                 use = pkg.use.enabled
6347                                                 if atom.use.enabled.difference(use):
6348                                                         continue
6349                                                 if atom.use.disabled.intersection(use):
6350                                                         continue
6351                                         if pkg.cp == atom_cp:
6352                                                 if highest_version is None:
6353                                                         highest_version = pkg
6354                                                 elif pkg > highest_version:
6355                                                         highest_version = pkg
6356                                         # At this point, we've found the highest visible
6357                                         # match from the current repo. Any lower versions
6358                                         # from this repo are ignored, so this so the loop
6359                                         # will always end with a break statement below
6360                                         # this point.
6361                                         if find_existing_node:
6362                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6363                                                 if not e_pkg:
6364                                                         break
6365                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6366                                                         if highest_version and \
6367                                                                 e_pkg.cp == atom_cp and \
6368                                                                 e_pkg < highest_version and \
6369                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6370                                                                 # There is a higher version available in a
6371                                                                 # different slot, so this existing node is
6372                                                                 # irrelevant.
6373                                                                 pass
6374                                                         else:
6375                                                                 matched_packages.append(e_pkg)
6376                                                                 existing_node = e_pkg
6377                                                 break
6378                                         # Compare built package to current config and
6379                                         # reject the built package if necessary.
6380                                         if built and not installed and \
6381                                                 ("--newuse" in self.myopts or \
6382                                                 "--reinstall" in self.myopts):
6383                                                 iuses = pkg.iuse.all
6384                                                 old_use = pkg.use.enabled
6385                                                 if myeb:
6386                                                         pkgsettings.setcpv(myeb)
6387                                                 else:
6388                                                         pkgsettings.setcpv(pkg)
6389                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6390                                                 forced_flags = set()
6391                                                 forced_flags.update(pkgsettings.useforce)
6392                                                 forced_flags.update(pkgsettings.usemask)
6393                                                 cur_iuse = iuses
6394                                                 if myeb and not usepkgonly:
6395                                                         cur_iuse = myeb.iuse.all
6396                                                 if self._reinstall_for_flags(forced_flags,
6397                                                         old_use, iuses,
6398                                                         now_use, cur_iuse):
6399                                                         break
6400                                         # Compare current config to installed package
6401                                         # and do not reinstall if possible.
6402                                         if not installed and \
6403                                                 ("--newuse" in self.myopts or \
6404                                                 "--reinstall" in self.myopts) and \
6405                                                 cpv in vardb.match(atom):
6406                                                 pkgsettings.setcpv(pkg)
6407                                                 forced_flags = set()
6408                                                 forced_flags.update(pkgsettings.useforce)
6409                                                 forced_flags.update(pkgsettings.usemask)
6410                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6411                                                 old_iuse = set(filter_iuse_defaults(
6412                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6413                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6414                                                 cur_iuse = pkg.iuse.all
6415                                                 reinstall_for_flags = \
6416                                                         self._reinstall_for_flags(
6417                                                         forced_flags, old_use, old_iuse,
6418                                                         cur_use, cur_iuse)
6419                                                 if reinstall_for_flags:
6420                                                         reinstall = True
6421                                         if not built:
6422                                                 myeb = pkg
6423                                         matched_packages.append(pkg)
6424                                         if reinstall_for_flags:
6425                                                 self._reinstall_nodes[pkg] = \
6426                                                         reinstall_for_flags
6427                                         break
6428
6429                 if not matched_packages:
6430                         return None, None
6431
6432                 if "--debug" in self.myopts:
6433                         for pkg in matched_packages:
6434                                 portage.writemsg("%s %s\n" % \
6435                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6436
6437                 # Filter out any old-style virtual matches if they are
6438                 # mixed with new-style virtual matches.
6439                 cp = portage.dep_getkey(atom)
6440                 if len(matched_packages) > 1 and \
6441                         "virtual" == portage.catsplit(cp)[0]:
6442                         for pkg in matched_packages:
6443                                 if pkg.cp != cp:
6444                                         continue
6445                                 # Got a new-style virtual, so filter
6446                                 # out any old-style virtuals.
6447                                 matched_packages = [pkg for pkg in matched_packages \
6448                                         if pkg.cp == cp]
6449                                 break
6450
6451                 if len(matched_packages) > 1:
6452                         bestmatch = portage.best(
6453                                 [pkg.cpv for pkg in matched_packages])
6454                         matched_packages = [pkg for pkg in matched_packages \
6455                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6456
6457                 # ordered by type preference ("ebuild" type is the last resort)
6458                 return  matched_packages[-1], existing_node
6459
6460         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6461                 """
6462                 Select packages that have already been added to the graph or
6463                 those that are installed and have not been scheduled for
6464                 replacement.
6465                 """
6466                 graph_db = self._graph_trees[root]["porttree"].dbapi
6467                 matches = graph_db.match_pkgs(atom)
6468                 if not matches:
6469                         return None, None
6470                 pkg = matches[-1] # highest match
6471                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6472                 return pkg, in_graph
6473
6474         def _complete_graph(self):
6475                 """
6476                 Add any deep dependencies of required sets (args, system, world) that
6477                 have not been pulled into the graph yet. This ensures that the graph
6478                 is consistent such that initially satisfied deep dependencies are not
6479                 broken in the new graph. Initially unsatisfied dependencies are
6480                 irrelevant since we only want to avoid breaking dependencies that are
6481                 intially satisfied.
6482
6483                 Since this method can consume enough time to disturb users, it is
6484                 currently only enabled by the --complete-graph option.
6485                 """
6486                 if "--buildpkgonly" in self.myopts or \
6487                         "recurse" not in self.myparams:
6488                         return 1
6489
6490                 if "complete" not in self.myparams:
6491                         # Skip this to avoid consuming enough time to disturb users.
6492                         return 1
6493
6494                 # Put the depgraph into a mode that causes it to only
6495                 # select packages that have already been added to the
6496                 # graph or those that are installed and have not been
6497                 # scheduled for replacement. Also, toggle the "deep"
6498                 # parameter so that all dependencies are traversed and
6499                 # accounted for.
6500                 self._select_atoms = self._select_atoms_from_graph
6501                 self._select_package = self._select_pkg_from_graph
6502                 already_deep = "deep" in self.myparams
6503                 if not already_deep:
6504                         self.myparams.add("deep")
6505
6506                 for root in self.roots:
6507                         required_set_names = self._required_set_names.copy()
6508                         if root == self.target_root and \
6509                                 (already_deep or "empty" in self.myparams):
6510                                 required_set_names.difference_update(self._sets)
6511                         if not required_set_names and not self._ignored_deps:
6512                                 continue
6513                         root_config = self.roots[root]
6514                         setconfig = root_config.setconfig
6515                         args = []
6516                         # Reuse existing SetArg instances when available.
6517                         for arg in self.digraph.root_nodes():
6518                                 if not isinstance(arg, SetArg):
6519                                         continue
6520                                 if arg.root_config != root_config:
6521                                         continue
6522                                 if arg.name in required_set_names:
6523                                         args.append(arg)
6524                                         required_set_names.remove(arg.name)
6525                         # Create new SetArg instances only when necessary.
6526                         for s in required_set_names:
6527                                 expanded_set = InternalPackageSet(
6528                                         initial_atoms=setconfig.getSetAtoms(s))
6529                                 atom = SETPREFIX + s
6530                                 args.append(SetArg(arg=atom, set=expanded_set,
6531                                         root_config=root_config))
6532                         vardb = root_config.trees["vartree"].dbapi
6533                         for arg in args:
6534                                 for atom in arg.set:
6535                                         self._dep_stack.append(
6536                                                 Dependency(atom=atom, root=root, parent=arg))
6537                         if self._ignored_deps:
6538                                 self._dep_stack.extend(self._ignored_deps)
6539                                 self._ignored_deps = []
6540                         if not self._create_graph(allow_unsatisfied=True):
6541                                 return 0
6542                         # Check the unsatisfied deps to see if any initially satisfied deps
6543                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6544                         # deps are irrelevant since we only want to avoid breaking deps
6545                         # that are initially satisfied.
6546                         while self._unsatisfied_deps:
6547                                 dep = self._unsatisfied_deps.pop()
6548                                 matches = vardb.match_pkgs(dep.atom)
6549                                 if not matches:
6550                                         self._initially_unsatisfied_deps.append(dep)
6551                                         continue
6552                                 # An scheduled installation broke a deep dependency.
6553                                 # Add the installed package to the graph so that it
6554                                 # will be appropriately reported as a slot collision
6555                                 # (possibly solvable via backtracking).
6556                                 pkg = matches[-1] # highest match
6557                                 if not self._add_pkg(pkg, dep):
6558                                         return 0
6559                                 if not self._create_graph(allow_unsatisfied=True):
6560                                         return 0
6561                 return 1
6562
6563         def _pkg(self, cpv, type_name, root_config, installed=False):
6564                 """
6565                 Get a package instance from the cache, or create a new
6566                 one if necessary. Raises KeyError from aux_get if it
6567                 failures for some reason (package does not exist or is
6568                 corrupt).
6569                 """
6570                 operation = "merge"
6571                 if installed:
6572                         operation = "nomerge"
6573                 pkg = self._pkg_cache.get(
6574                         (type_name, root_config.root, cpv, operation))
6575                 if pkg is None:
6576                         tree_type = self.pkg_tree_map[type_name]
6577                         db = root_config.trees[tree_type].dbapi
6578                         db_keys = list(self._trees_orig[root_config.root][
6579                                 tree_type].dbapi._aux_cache_keys)
6580                         try:
6581                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6582                         except KeyError:
6583                                 raise portage.exception.PackageNotFound(cpv)
6584                         pkg = Package(cpv=cpv, metadata=metadata,
6585                                 root_config=root_config, installed=installed)
6586                         if type_name == "ebuild":
6587                                 settings = self.pkgsettings[root_config.root]
6588                                 settings.setcpv(pkg)
6589                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6590                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6591                         self._pkg_cache[pkg] = pkg
6592                 return pkg
6593
6594         def validate_blockers(self):
6595                 """Remove any blockers from the digraph that do not match any of the
6596                 packages within the graph.  If necessary, create hard deps to ensure
6597                 correct merge order such that mutually blocking packages are never
6598                 installed simultaneously."""
6599
6600                 if "--buildpkgonly" in self.myopts or \
6601                         "--nodeps" in self.myopts:
6602                         return True
6603
6604                 #if "deep" in self.myparams:
6605                 if True:
6606                         # Pull in blockers from all installed packages that haven't already
6607                         # been pulled into the depgraph.  This is not enabled by default
6608                         # due to the performance penalty that is incurred by all the
6609                         # additional dep_check calls that are required.
6610
6611                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6612                         for myroot in self.trees:
6613                                 vardb = self.trees[myroot]["vartree"].dbapi
6614                                 portdb = self.trees[myroot]["porttree"].dbapi
6615                                 pkgsettings = self.pkgsettings[myroot]
6616                                 final_db = self.mydbapi[myroot]
6617
6618                                 blocker_cache = BlockerCache(myroot, vardb)
6619                                 stale_cache = set(blocker_cache)
6620                                 for pkg in vardb:
6621                                         cpv = pkg.cpv
6622                                         stale_cache.discard(cpv)
6623                                         pkg_in_graph = self.digraph.contains(pkg)
6624
6625                                         # Check for masked installed packages. Only warn about
6626                                         # packages that are in the graph in order to avoid warning
6627                                         # about those that will be automatically uninstalled during
6628                                         # the merge process or by --depclean.
6629                                         if pkg in final_db:
6630                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6631                                                         self._masked_installed.add(pkg)
6632
6633                                         blocker_atoms = None
6634                                         blockers = None
6635                                         if pkg_in_graph:
6636                                                 blockers = []
6637                                                 try:
6638                                                         blockers.extend(
6639                                                                 self._blocker_parents.child_nodes(pkg))
6640                                                 except KeyError:
6641                                                         pass
6642                                                 try:
6643                                                         blockers.extend(
6644                                                                 self._irrelevant_blockers.child_nodes(pkg))
6645                                                 except KeyError:
6646                                                         pass
6647                                         if blockers is not None:
6648                                                 blockers = set(str(blocker.atom) \
6649                                                         for blocker in blockers)
6650
6651                                         # If this node has any blockers, create a "nomerge"
6652                                         # node for it so that they can be enforced.
6653                                         self.spinner.update()
6654                                         blocker_data = blocker_cache.get(cpv)
6655                                         if blocker_data is not None and \
6656                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6657                                                 blocker_data = None
6658
6659                                         # If blocker data from the graph is available, use
6660                                         # it to validate the cache and update the cache if
6661                                         # it seems invalid.
6662                                         if blocker_data is not None and \
6663                                                 blockers is not None:
6664                                                 if not blockers.symmetric_difference(
6665                                                         blocker_data.atoms):
6666                                                         continue
6667                                                 blocker_data = None
6668
6669                                         if blocker_data is None and \
6670                                                 blockers is not None:
6671                                                 # Re-use the blockers from the graph.
6672                                                 blocker_atoms = sorted(blockers)
6673                                                 counter = long(pkg.metadata["COUNTER"])
6674                                                 blocker_data = \
6675                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6676                                                 blocker_cache[pkg.cpv] = blocker_data
6677                                                 continue
6678
6679                                         if blocker_data:
6680                                                 blocker_atoms = blocker_data.atoms
6681                                         else:
6682                                                 # Use aux_get() to trigger FakeVartree global
6683                                                 # updates on *DEPEND when appropriate.
6684                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6685                                                 # It is crucial to pass in final_db here in order to
6686                                                 # optimize dep_check calls by eliminating atoms via
6687                                                 # dep_wordreduce and dep_eval calls.
6688                                                 try:
6689                                                         portage.dep._dep_check_strict = False
6690                                                         try:
6691                                                                 success, atoms = portage.dep_check(depstr,
6692                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6693                                                                         trees=self._graph_trees, myroot=myroot)
6694                                                         except Exception, e:
6695                                                                 if isinstance(e, SystemExit):
6696                                                                         raise
6697                                                                 # This is helpful, for example, if a ValueError
6698                                                                 # is thrown from cpv_expand due to multiple
6699                                                                 # matches (this can happen if an atom lacks a
6700                                                                 # category).
6701                                                                 show_invalid_depstring_notice(
6702                                                                         pkg, depstr, str(e))
6703                                                                 del e
6704                                                                 raise
6705                                                 finally:
6706                                                         portage.dep._dep_check_strict = True
6707                                                 if not success:
6708                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6709                                                         if replacement_pkg and \
6710                                                                 replacement_pkg[0].operation == "merge":
6711                                                                 # This package is being replaced anyway, so
6712                                                                 # ignore invalid dependencies so as not to
6713                                                                 # annoy the user too much (otherwise they'd be
6714                                                                 # forced to manually unmerge it first).
6715                                                                 continue
6716                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6717                                                         return False
6718                                                 blocker_atoms = [myatom for myatom in atoms \
6719                                                         if myatom.startswith("!")]
6720                                                 blocker_atoms.sort()
6721                                                 counter = long(pkg.metadata["COUNTER"])
6722                                                 blocker_cache[cpv] = \
6723                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6724                                         if blocker_atoms:
6725                                                 try:
6726                                                         for atom in blocker_atoms:
6727                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6728                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6729                                                                 self._blocker_parents.add(blocker, pkg)
6730                                                 except portage.exception.InvalidAtom, e:
6731                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6732                                                         show_invalid_depstring_notice(
6733                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6734                                                         return False
6735                                 for cpv in stale_cache:
6736                                         del blocker_cache[cpv]
6737                                 blocker_cache.flush()
6738                                 del blocker_cache
6739
6740                 # Discard any "uninstall" tasks scheduled by previous calls
6741                 # to this method, since those tasks may not make sense given
6742                 # the current graph state.
6743                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6744                 if previous_uninstall_tasks:
6745                         self._blocker_uninstalls = digraph()
6746                         self.digraph.difference_update(previous_uninstall_tasks)
6747
6748                 for blocker in self._blocker_parents.leaf_nodes():
6749                         self.spinner.update()
6750                         root_config = self.roots[blocker.root]
6751                         virtuals = root_config.settings.getvirtuals()
6752                         myroot = blocker.root
6753                         initial_db = self.trees[myroot]["vartree"].dbapi
6754                         final_db = self.mydbapi[myroot]
6755                         
6756                         provider_virtual = False
6757                         if blocker.cp in virtuals and \
6758                                 not self._have_new_virt(blocker.root, blocker.cp):
6759                                 provider_virtual = True
6760
6761                         if provider_virtual:
6762                                 atoms = []
6763                                 for provider_entry in virtuals[blocker.cp]:
6764                                         provider_cp = \
6765                                                 portage.dep_getkey(provider_entry)
6766                                         atoms.append(blocker.atom.replace(
6767                                                 blocker.cp, provider_cp))
6768                         else:
6769                                 atoms = [blocker.atom]
6770
6771                         blocked_initial = []
6772                         for atom in atoms:
6773                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6774
6775                         blocked_final = []
6776                         for atom in atoms:
6777                                 blocked_final.extend(final_db.match_pkgs(atom))
6778
6779                         if not blocked_initial and not blocked_final:
6780                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6781                                 self._blocker_parents.remove(blocker)
6782                                 # Discard any parents that don't have any more blockers.
6783                                 for pkg in parent_pkgs:
6784                                         self._irrelevant_blockers.add(blocker, pkg)
6785                                         if not self._blocker_parents.child_nodes(pkg):
6786                                                 self._blocker_parents.remove(pkg)
6787                                 continue
6788                         for parent in self._blocker_parents.parent_nodes(blocker):
6789                                 unresolved_blocks = False
6790                                 depends_on_order = set()
6791                                 for pkg in blocked_initial:
6792                                         if pkg.slot_atom == parent.slot_atom:
6793                                                 # TODO: Support blocks within slots in cases where it
6794                                                 # might make sense.  For example, a new version might
6795                                                 # require that the old version be uninstalled at build
6796                                                 # time.
6797                                                 continue
6798                                         if parent.installed:
6799                                                 # Two currently installed packages conflict with
6800                                                 # eachother. Ignore this case since the damage
6801                                                 # is already done and this would be likely to
6802                                                 # confuse users if displayed like a normal blocker.
6803                                                 continue
6804
6805                                         self._blocked_pkgs.add(pkg, blocker)
6806
6807                                         if parent.operation == "merge":
6808                                                 # Maybe the blocked package can be replaced or simply
6809                                                 # unmerged to resolve this block.
6810                                                 depends_on_order.add((pkg, parent))
6811                                                 continue
6812                                         # None of the above blocker resolutions techniques apply,
6813                                         # so apparently this one is unresolvable.
6814                                         unresolved_blocks = True
6815                                 for pkg in blocked_final:
6816                                         if pkg.slot_atom == parent.slot_atom:
6817                                                 # TODO: Support blocks within slots.
6818                                                 continue
6819                                         if parent.operation == "nomerge" and \
6820                                                 pkg.operation == "nomerge":
6821                                                 # This blocker will be handled the next time that a
6822                                                 # merge of either package is triggered.
6823                                                 continue
6824
6825                                         self._blocked_pkgs.add(pkg, blocker)
6826
6827                                         # Maybe the blocking package can be
6828                                         # unmerged to resolve this block.
6829                                         if parent.operation == "merge" and pkg.installed:
6830                                                 depends_on_order.add((pkg, parent))
6831                                                 continue
6832                                         elif parent.operation == "nomerge":
6833                                                 depends_on_order.add((parent, pkg))
6834                                                 continue
6835                                         # None of the above blocker resolutions techniques apply,
6836                                         # so apparently this one is unresolvable.
6837                                         unresolved_blocks = True
6838
6839                                 # Make sure we don't unmerge any package that have been pulled
6840                                 # into the graph.
6841                                 if not unresolved_blocks and depends_on_order:
6842                                         for inst_pkg, inst_task in depends_on_order:
6843                                                 if self.digraph.contains(inst_pkg) and \
6844                                                         self.digraph.parent_nodes(inst_pkg):
6845                                                         unresolved_blocks = True
6846                                                         break
6847
6848                                 if not unresolved_blocks and depends_on_order:
6849                                         for inst_pkg, inst_task in depends_on_order:
6850                                                 uninst_task = Package(built=inst_pkg.built,
6851                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6852                                                         metadata=inst_pkg.metadata,
6853                                                         operation="uninstall",
6854                                                         root_config=inst_pkg.root_config,
6855                                                         type_name=inst_pkg.type_name)
6856                                                 self._pkg_cache[uninst_task] = uninst_task
6857                                                 # Enforce correct merge order with a hard dep.
6858                                                 self.digraph.addnode(uninst_task, inst_task,
6859                                                         priority=BlockerDepPriority.instance)
6860                                                 # Count references to this blocker so that it can be
6861                                                 # invalidated after nodes referencing it have been
6862                                                 # merged.
6863                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6864                                 if not unresolved_blocks and not depends_on_order:
6865                                         self._irrelevant_blockers.add(blocker, parent)
6866                                         self._blocker_parents.remove_edge(blocker, parent)
6867                                         if not self._blocker_parents.parent_nodes(blocker):
6868                                                 self._blocker_parents.remove(blocker)
6869                                         if not self._blocker_parents.child_nodes(parent):
6870                                                 self._blocker_parents.remove(parent)
6871                                 if unresolved_blocks:
6872                                         self._unsolvable_blockers.add(blocker, parent)
6873
6874                 return True
6875
6876         def _accept_blocker_conflicts(self):
6877                 acceptable = False
6878                 for x in ("--buildpkgonly", "--fetchonly",
6879                         "--fetch-all-uri", "--nodeps"):
6880                         if x in self.myopts:
6881                                 acceptable = True
6882                                 break
6883                 return acceptable
6884
6885         def _merge_order_bias(self, mygraph):
6886                 """
6887                 For optimal leaf node selection, promote deep system runtime deps and
6888                 order nodes from highest to lowest overall reference count.
6889                 """
6890
6891                 node_info = {}
6892                 for node in mygraph.order:
6893                         node_info[node] = len(mygraph.parent_nodes(node))
6894                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6895
6896                 def cmp_merge_preference(node1, node2):
6897
6898                         if node1.operation == 'uninstall':
6899                                 if node2.operation == 'uninstall':
6900                                         return 0
6901                                 return 1
6902
6903                         if node2.operation == 'uninstall':
6904                                 if node1.operation == 'uninstall':
6905                                         return 0
6906                                 return -1
6907
6908                         node1_sys = node1 in deep_system_deps
6909                         node2_sys = node2 in deep_system_deps
6910                         if node1_sys != node2_sys:
6911                                 if node1_sys:
6912                                         return -1
6913                                 return 1
6914
6915                         return node_info[node2] - node_info[node1]
6916
6917                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6918
6919         def altlist(self, reversed=False):
6920
6921                 while self._serialized_tasks_cache is None:
6922                         self._resolve_conflicts()
6923                         try:
6924                                 self._serialized_tasks_cache, self._scheduler_graph = \
6925                                         self._serialize_tasks()
6926                         except self._serialize_tasks_retry:
6927                                 pass
6928
6929                 retlist = self._serialized_tasks_cache[:]
6930                 if reversed:
6931                         retlist.reverse()
6932                 return retlist
6933
6934         def schedulerGraph(self):
6935                 """
6936                 The scheduler graph is identical to the normal one except that
6937                 uninstall edges are reversed in specific cases that require
6938                 conflicting packages to be temporarily installed simultaneously.
6939                 This is intended for use by the Scheduler in it's parallelization
6940                 logic. It ensures that temporary simultaneous installation of
6941                 conflicting packages is avoided when appropriate (especially for
6942                 !!atom blockers), but allowed in specific cases that require it.
6943
6944                 Note that this method calls break_refs() which alters the state of
6945                 internal Package instances such that this depgraph instance should
6946                 not be used to perform any more calculations.
6947                 """
6948                 if self._scheduler_graph is None:
6949                         self.altlist()
6950                 self.break_refs(self._scheduler_graph.order)
6951                 return self._scheduler_graph
6952
6953         def break_refs(self, nodes):
6954                 """
6955                 Take a mergelist like that returned from self.altlist() and
6956                 break any references that lead back to the depgraph. This is
6957                 useful if you want to hold references to packages without
6958                 also holding the depgraph on the heap.
6959                 """
6960                 for node in nodes:
6961                         if hasattr(node, "root_config"):
6962                                 # The FakeVartree references the _package_cache which
6963                                 # references the depgraph. So that Package instances don't
6964                                 # hold the depgraph and FakeVartree on the heap, replace
6965                                 # the RootConfig that references the FakeVartree with the
6966                                 # original RootConfig instance which references the actual
6967                                 # vartree.
6968                                 node.root_config = \
6969                                         self._trees_orig[node.root_config.root]["root_config"]
6970
6971         def _resolve_conflicts(self):
6972                 if not self._complete_graph():
6973                         raise self._unknown_internal_error()
6974
6975                 if not self.validate_blockers():
6976                         raise self._unknown_internal_error()
6977
6978                 if self._slot_collision_info:
6979                         self._process_slot_conflicts()
6980
6981         def _serialize_tasks(self):
6982
6983                 if "--debug" in self.myopts:
6984                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6985                         self.digraph.debug_print()
6986                         writemsg("\n", noiselevel=-1)
6987
6988                 scheduler_graph = self.digraph.copy()
6989                 mygraph=self.digraph.copy()
6990                 # Prune "nomerge" root nodes if nothing depends on them, since
6991                 # otherwise they slow down merge order calculation. Don't remove
6992                 # non-root nodes since they help optimize merge order in some cases
6993                 # such as revdep-rebuild.
6994                 removed_nodes = set()
6995                 while True:
6996                         for node in mygraph.root_nodes():
6997                                 if not isinstance(node, Package) or \
6998                                         node.installed or node.onlydeps:
6999                                         removed_nodes.add(node)
7000                         if removed_nodes:
7001                                 self.spinner.update()
7002                                 mygraph.difference_update(removed_nodes)
7003                         if not removed_nodes:
7004                                 break
7005                         removed_nodes.clear()
7006                 self._merge_order_bias(mygraph)
7007                 def cmp_circular_bias(n1, n2):
7008                         """
7009                         RDEPEND is stronger than PDEPEND and this function
7010                         measures such a strength bias within a circular
7011                         dependency relationship.
7012                         """
7013                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7014                                 ignore_priority=priority_range.ignore_medium_soft)
7015                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7016                                 ignore_priority=priority_range.ignore_medium_soft)
7017                         if n1_n2_medium == n2_n1_medium:
7018                                 return 0
7019                         elif n1_n2_medium:
7020                                 return 1
7021                         return -1
7022                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7023                 retlist=[]
7024                 # Contains uninstall tasks that have been scheduled to
7025                 # occur after overlapping blockers have been installed.
7026                 scheduled_uninstalls = set()
7027                 # Contains any Uninstall tasks that have been ignored
7028                 # in order to avoid the circular deps code path. These
7029                 # correspond to blocker conflicts that could not be
7030                 # resolved.
7031                 ignored_uninstall_tasks = set()
7032                 have_uninstall_task = False
7033                 complete = "complete" in self.myparams
7034                 asap_nodes = []
7035
7036                 def get_nodes(**kwargs):
7037                         """
7038                         Returns leaf nodes excluding Uninstall instances
7039                         since those should be executed as late as possible.
7040                         """
7041                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7042                                 if isinstance(node, Package) and \
7043                                         (node.operation != "uninstall" or \
7044                                         node in scheduled_uninstalls)]
7045
7046                 # sys-apps/portage needs special treatment if ROOT="/"
7047                 running_root = self._running_root.root
7048                 from portage.const import PORTAGE_PACKAGE_ATOM
7049                 runtime_deps = InternalPackageSet(
7050                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7051                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7052                         PORTAGE_PACKAGE_ATOM)
7053                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7054                         PORTAGE_PACKAGE_ATOM)
7055
7056                 if running_portage:
7057                         running_portage = running_portage[0]
7058                 else:
7059                         running_portage = None
7060
7061                 if replacement_portage:
7062                         replacement_portage = replacement_portage[0]
7063                 else:
7064                         replacement_portage = None
7065
7066                 if replacement_portage == running_portage:
7067                         replacement_portage = None
7068
7069                 if replacement_portage is not None:
7070                         # update from running_portage to replacement_portage asap
7071                         asap_nodes.append(replacement_portage)
7072
7073                 if running_portage is not None:
7074                         try:
7075                                 portage_rdepend = self._select_atoms_highest_available(
7076                                         running_root, running_portage.metadata["RDEPEND"],
7077                                         myuse=running_portage.use.enabled,
7078                                         parent=running_portage, strict=False)
7079                         except portage.exception.InvalidDependString, e:
7080                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7081                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7082                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7083                                 del e
7084                                 portage_rdepend = []
7085                         runtime_deps.update(atom for atom in portage_rdepend \
7086                                 if not atom.startswith("!"))
7087
7088                 def gather_deps(ignore_priority, mergeable_nodes,
7089                         selected_nodes, node):
7090                         """
7091                         Recursively gather a group of nodes that RDEPEND on
7092                         eachother. This ensures that they are merged as a group
7093                         and get their RDEPENDs satisfied as soon as possible.
7094                         """
7095                         if node in selected_nodes:
7096                                 return True
7097                         if node not in mergeable_nodes:
7098                                 return False
7099                         if node == replacement_portage and \
7100                                 mygraph.child_nodes(node,
7101                                 ignore_priority=priority_range.ignore_medium_soft):
7102                                 # Make sure that portage always has all of it's
7103                                 # RDEPENDs installed first.
7104                                 return False
7105                         selected_nodes.add(node)
7106                         for child in mygraph.child_nodes(node,
7107                                 ignore_priority=ignore_priority):
7108                                 if not gather_deps(ignore_priority,
7109                                         mergeable_nodes, selected_nodes, child):
7110                                         return False
7111                         return True
7112
7113                 def ignore_uninst_or_med(priority):
7114                         if priority is BlockerDepPriority.instance:
7115                                 return True
7116                         return priority_range.ignore_medium(priority)
7117
7118                 def ignore_uninst_or_med_soft(priority):
7119                         if priority is BlockerDepPriority.instance:
7120                                 return True
7121                         return priority_range.ignore_medium_soft(priority)
7122
7123                 tree_mode = "--tree" in self.myopts
7124                 # Tracks whether or not the current iteration should prefer asap_nodes
7125                 # if available.  This is set to False when the previous iteration
7126                 # failed to select any nodes.  It is reset whenever nodes are
7127                 # successfully selected.
7128                 prefer_asap = True
7129
7130                 # Controls whether or not the current iteration should drop edges that
7131                 # are "satisfied" by installed packages, in order to solve circular
7132                 # dependencies. The deep runtime dependencies of installed packages are
7133                 # not checked in this case (bug #199856), so it must be avoided
7134                 # whenever possible.
7135                 drop_satisfied = False
7136
7137                 # State of variables for successive iterations that loosen the
7138                 # criteria for node selection.
7139                 #
7140                 # iteration   prefer_asap   drop_satisfied
7141                 # 1           True          False
7142                 # 2           False         False
7143                 # 3           False         True
7144                 #
7145                 # If no nodes are selected on the last iteration, it is due to
7146                 # unresolved blockers or circular dependencies.
7147
7148                 while not mygraph.empty():
7149                         self.spinner.update()
7150                         selected_nodes = None
7151                         ignore_priority = None
7152                         if drop_satisfied or (prefer_asap and asap_nodes):
7153                                 priority_range = DepPrioritySatisfiedRange
7154                         else:
7155                                 priority_range = DepPriorityNormalRange
7156                         if prefer_asap and asap_nodes:
7157                                 # ASAP nodes are merged before their soft deps. Go ahead and
7158                                 # select root nodes here if necessary, since it's typical for
7159                                 # the parent to have been removed from the graph already.
7160                                 asap_nodes = [node for node in asap_nodes \
7161                                         if mygraph.contains(node)]
7162                                 for node in asap_nodes:
7163                                         if not mygraph.child_nodes(node,
7164                                                 ignore_priority=priority_range.ignore_soft):
7165                                                 selected_nodes = [node]
7166                                                 asap_nodes.remove(node)
7167                                                 break
7168                         if not selected_nodes and \
7169                                 not (prefer_asap and asap_nodes):
7170                                 for i in xrange(priority_range.NONE,
7171                                         priority_range.MEDIUM_SOFT + 1):
7172                                         ignore_priority = priority_range.ignore_priority[i]
7173                                         nodes = get_nodes(ignore_priority=ignore_priority)
7174                                         if nodes:
7175                                                 # If there is a mix of uninstall nodes with other
7176                                                 # types, save the uninstall nodes for later since
7177                                                 # sometimes a merge node will render an uninstall
7178                                                 # node unnecessary (due to occupying the same slot),
7179                                                 # and we want to avoid executing a separate uninstall
7180                                                 # task in that case.
7181                                                 if len(nodes) > 1:
7182                                                         good_uninstalls = []
7183                                                         with_some_uninstalls_excluded = []
7184                                                         for node in nodes:
7185                                                                 if node.operation == "uninstall":
7186                                                                         slot_node = self.mydbapi[node.root
7187                                                                                 ].match_pkgs(node.slot_atom)
7188                                                                         if slot_node and \
7189                                                                                 slot_node[0].operation == "merge":
7190                                                                                 continue
7191                                                                         good_uninstalls.append(node)
7192                                                                 with_some_uninstalls_excluded.append(node)
7193                                                         if good_uninstalls:
7194                                                                 nodes = good_uninstalls
7195                                                         elif with_some_uninstalls_excluded:
7196                                                                 nodes = with_some_uninstalls_excluded
7197                                                         else:
7198                                                                 nodes = nodes
7199
7200                                                 if ignore_priority is None and not tree_mode:
7201                                                         # Greedily pop all of these nodes since no
7202                                                         # relationship has been ignored. This optimization
7203                                                         # destroys --tree output, so it's disabled in tree
7204                                                         # mode.
7205                                                         selected_nodes = nodes
7206                                                 else:
7207                                                         # For optimal merge order:
7208                                                         #  * Only pop one node.
7209                                                         #  * Removing a root node (node without a parent)
7210                                                         #    will not produce a leaf node, so avoid it.
7211                                                         #  * It's normal for a selected uninstall to be a
7212                                                         #    root node, so don't check them for parents.
7213                                                         for node in nodes:
7214                                                                 if node.operation == "uninstall" or \
7215                                                                         mygraph.parent_nodes(node):
7216                                                                         selected_nodes = [node]
7217                                                                         break
7218
7219                                                 if selected_nodes:
7220                                                         break
7221
7222                         if not selected_nodes:
7223                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7224                                 if nodes:
7225                                         mergeable_nodes = set(nodes)
7226                                         if prefer_asap and asap_nodes:
7227                                                 nodes = asap_nodes
7228                                         for i in xrange(priority_range.SOFT,
7229                                                 priority_range.MEDIUM_SOFT + 1):
7230                                                 ignore_priority = priority_range.ignore_priority[i]
7231                                                 for node in nodes:
7232                                                         if not mygraph.parent_nodes(node):
7233                                                                 continue
7234                                                         selected_nodes = set()
7235                                                         if gather_deps(ignore_priority,
7236                                                                 mergeable_nodes, selected_nodes, node):
7237                                                                 break
7238                                                         else:
7239                                                                 selected_nodes = None
7240                                                 if selected_nodes:
7241                                                         break
7242
7243                                         if prefer_asap and asap_nodes and not selected_nodes:
7244                                                 # We failed to find any asap nodes to merge, so ignore
7245                                                 # them for the next iteration.
7246                                                 prefer_asap = False
7247                                                 continue
7248
7249                         if selected_nodes and ignore_priority is not None:
7250                                 # Try to merge ignored medium_soft deps as soon as possible
7251                                 # if they're not satisfied by installed packages.
7252                                 for node in selected_nodes:
7253                                         children = set(mygraph.child_nodes(node))
7254                                         soft = children.difference(
7255                                                 mygraph.child_nodes(node,
7256                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7257                                         medium_soft = children.difference(
7258                                                 mygraph.child_nodes(node,
7259                                                         ignore_priority = \
7260                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7261                                         medium_soft.difference_update(soft)
7262                                         for child in medium_soft:
7263                                                 if child in selected_nodes:
7264                                                         continue
7265                                                 if child in asap_nodes:
7266                                                         continue
7267                                                 asap_nodes.append(child)
7268
7269                         if selected_nodes and len(selected_nodes) > 1:
7270                                 if not isinstance(selected_nodes, list):
7271                                         selected_nodes = list(selected_nodes)
7272                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7273
7274                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7275                                 # An Uninstall task needs to be executed in order to
7276                                 # avoid conflict if possible.
7277
7278                                 if drop_satisfied:
7279                                         priority_range = DepPrioritySatisfiedRange
7280                                 else:
7281                                         priority_range = DepPriorityNormalRange
7282
7283                                 mergeable_nodes = get_nodes(
7284                                         ignore_priority=ignore_uninst_or_med)
7285
7286                                 min_parent_deps = None
7287                                 uninst_task = None
7288                                 for task in myblocker_uninstalls.leaf_nodes():
7289                                         # Do some sanity checks so that system or world packages
7290                                         # don't get uninstalled inappropriately here (only really
7291                                         # necessary when --complete-graph has not been enabled).
7292
7293                                         if task in ignored_uninstall_tasks:
7294                                                 continue
7295
7296                                         if task in scheduled_uninstalls:
7297                                                 # It's been scheduled but it hasn't
7298                                                 # been executed yet due to dependence
7299                                                 # on installation of blocking packages.
7300                                                 continue
7301
7302                                         root_config = self.roots[task.root]
7303                                         inst_pkg = self._pkg_cache[
7304                                                 ("installed", task.root, task.cpv, "nomerge")]
7305
7306                                         if self.digraph.contains(inst_pkg):
7307                                                 continue
7308
7309                                         forbid_overlap = False
7310                                         heuristic_overlap = False
7311                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7312                                                 if blocker.eapi in ("0", "1"):
7313                                                         heuristic_overlap = True
7314                                                 elif blocker.atom.blocker.overlap.forbid:
7315                                                         forbid_overlap = True
7316                                                         break
7317                                         if forbid_overlap and running_root == task.root:
7318                                                 continue
7319
7320                                         if heuristic_overlap and running_root == task.root:
7321                                                 # Never uninstall sys-apps/portage or it's essential
7322                                                 # dependencies, except through replacement.
7323                                                 try:
7324                                                         runtime_dep_atoms = \
7325                                                                 list(runtime_deps.iterAtomsForPackage(task))
7326                                                 except portage.exception.InvalidDependString, e:
7327                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7328                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7329                                                                 (task.root, task.cpv, e), noiselevel=-1)
7330                                                         del e
7331                                                         continue
7332
7333                                                 # Don't uninstall a runtime dep if it appears
7334                                                 # to be the only suitable one installed.
7335                                                 skip = False
7336                                                 vardb = root_config.trees["vartree"].dbapi
7337                                                 for atom in runtime_dep_atoms:
7338                                                         other_version = None
7339                                                         for pkg in vardb.match_pkgs(atom):
7340                                                                 if pkg.cpv == task.cpv and \
7341                                                                         pkg.metadata["COUNTER"] == \
7342                                                                         task.metadata["COUNTER"]:
7343                                                                         continue
7344                                                                 other_version = pkg
7345                                                                 break
7346                                                         if other_version is None:
7347                                                                 skip = True
7348                                                                 break
7349                                                 if skip:
7350                                                         continue
7351
7352                                                 # For packages in the system set, don't take
7353                                                 # any chances. If the conflict can't be resolved
7354                                                 # by a normal replacement operation then abort.
7355                                                 skip = False
7356                                                 try:
7357                                                         for atom in root_config.sets[
7358                                                                 "system"].iterAtomsForPackage(task):
7359                                                                 skip = True
7360                                                                 break
7361                                                 except portage.exception.InvalidDependString, e:
7362                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7363                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7364                                                                 (task.root, task.cpv, e), noiselevel=-1)
7365                                                         del e
7366                                                         skip = True
7367                                                 if skip:
7368                                                         continue
7369
7370                                         # Note that the world check isn't always
7371                                         # necessary since self._complete_graph() will
7372                                         # add all packages from the system and world sets to the
7373                                         # graph. This just allows unresolved conflicts to be
7374                                         # detected as early as possible, which makes it possible
7375                                         # to avoid calling self._complete_graph() when it is
7376                                         # unnecessary due to blockers triggering an abortion.
7377                                         if not complete:
7378                                                 # For packages in the world set, go ahead an uninstall
7379                                                 # when necessary, as long as the atom will be satisfied
7380                                                 # in the final state.
7381                                                 graph_db = self.mydbapi[task.root]
7382                                                 skip = False
7383                                                 try:
7384                                                         for atom in root_config.sets[
7385                                                                 "world"].iterAtomsForPackage(task):
7386                                                                 satisfied = False
7387                                                                 for pkg in graph_db.match_pkgs(atom):
7388                                                                         if pkg == inst_pkg:
7389                                                                                 continue
7390                                                                         satisfied = True
7391                                                                         break
7392                                                                 if not satisfied:
7393                                                                         skip = True
7394                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7395                                                                         break
7396                                                 except portage.exception.InvalidDependString, e:
7397                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7398                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7399                                                                 (task.root, task.cpv, e), noiselevel=-1)
7400                                                         del e
7401                                                         skip = True
7402                                                 if skip:
7403                                                         continue
7404
7405                                         # Check the deps of parent nodes to ensure that
7406                                         # the chosen task produces a leaf node. Maybe
7407                                         # this can be optimized some more to make the
7408                                         # best possible choice, but the current algorithm
7409                                         # is simple and should be near optimal for most
7410                                         # common cases.
7411                                         mergeable_parent = False
7412                                         parent_deps = set()
7413                                         for parent in mygraph.parent_nodes(task):
7414                                                 parent_deps.update(mygraph.child_nodes(parent,
7415                                                         ignore_priority=priority_range.ignore_medium_soft))
7416                                                 if parent in mergeable_nodes and \
7417                                                         gather_deps(ignore_uninst_or_med_soft,
7418                                                         mergeable_nodes, set(), parent):
7419                                                         mergeable_parent = True
7420
7421                                         if not mergeable_parent:
7422                                                 continue
7423
7424                                         parent_deps.remove(task)
7425                                         if min_parent_deps is None or \
7426                                                 len(parent_deps) < min_parent_deps:
7427                                                 min_parent_deps = len(parent_deps)
7428                                                 uninst_task = task
7429
7430                                 if uninst_task is not None:
7431                                         # The uninstall is performed only after blocking
7432                                         # packages have been merged on top of it. File
7433                                         # collisions between blocking packages are detected
7434                                         # and removed from the list of files to be uninstalled.
7435                                         scheduled_uninstalls.add(uninst_task)
7436                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7437
7438                                         # Reverse the parent -> uninstall edges since we want
7439                                         # to do the uninstall after blocking packages have
7440                                         # been merged on top of it.
7441                                         mygraph.remove(uninst_task)
7442                                         for blocked_pkg in parent_nodes:
7443                                                 mygraph.add(blocked_pkg, uninst_task,
7444                                                         priority=BlockerDepPriority.instance)
7445                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7446                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7447                                                         priority=BlockerDepPriority.instance)
7448
7449                                         # Reset the state variables for leaf node selection and
7450                                         # continue trying to select leaf nodes.
7451                                         prefer_asap = True
7452                                         drop_satisfied = False
7453                                         continue
7454
7455                         if not selected_nodes:
7456                                 # Only select root nodes as a last resort. This case should
7457                                 # only trigger when the graph is nearly empty and the only
7458                                 # remaining nodes are isolated (no parents or children). Since
7459                                 # the nodes must be isolated, ignore_priority is not needed.
7460                                 selected_nodes = get_nodes()
7461
7462                         if not selected_nodes and not drop_satisfied:
7463                                 drop_satisfied = True
7464                                 continue
7465
7466                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7467                                 # If possible, drop an uninstall task here in order to avoid
7468                                 # the circular deps code path. The corresponding blocker will
7469                                 # still be counted as an unresolved conflict.
7470                                 uninst_task = None
7471                                 for node in myblocker_uninstalls.leaf_nodes():
7472                                         try:
7473                                                 mygraph.remove(node)
7474                                         except KeyError:
7475                                                 pass
7476                                         else:
7477                                                 uninst_task = node
7478                                                 ignored_uninstall_tasks.add(node)
7479                                                 break
7480
7481                                 if uninst_task is not None:
7482                                         # Reset the state variables for leaf node selection and
7483                                         # continue trying to select leaf nodes.
7484                                         prefer_asap = True
7485                                         drop_satisfied = False
7486                                         continue
7487
7488                         if not selected_nodes:
7489                                 self._circular_deps_for_display = mygraph
7490                                 raise self._unknown_internal_error()
7491
7492                         # At this point, we've succeeded in selecting one or more nodes, so
7493                         # reset state variables for leaf node selection.
7494                         prefer_asap = True
7495                         drop_satisfied = False
7496
7497                         mygraph.difference_update(selected_nodes)
7498
7499                         for node in selected_nodes:
7500                                 if isinstance(node, Package) and \
7501                                         node.operation == "nomerge":
7502                                         continue
7503
7504                                 # Handle interactions between blockers
7505                                 # and uninstallation tasks.
7506                                 solved_blockers = set()
7507                                 uninst_task = None
7508                                 if isinstance(node, Package) and \
7509                                         "uninstall" == node.operation:
7510                                         have_uninstall_task = True
7511                                         uninst_task = node
7512                                 else:
7513                                         vardb = self.trees[node.root]["vartree"].dbapi
7514                                         previous_cpv = vardb.match(node.slot_atom)
7515                                         if previous_cpv:
7516                                                 # The package will be replaced by this one, so remove
7517                                                 # the corresponding Uninstall task if necessary.
7518                                                 previous_cpv = previous_cpv[0]
7519                                                 uninst_task = \
7520                                                         ("installed", node.root, previous_cpv, "uninstall")
7521                                                 try:
7522                                                         mygraph.remove(uninst_task)
7523                                                 except KeyError:
7524                                                         pass
7525
7526                                 if uninst_task is not None and \
7527                                         uninst_task not in ignored_uninstall_tasks and \
7528                                         myblocker_uninstalls.contains(uninst_task):
7529                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7530                                         myblocker_uninstalls.remove(uninst_task)
7531                                         # Discard any blockers that this Uninstall solves.
7532                                         for blocker in blocker_nodes:
7533                                                 if not myblocker_uninstalls.child_nodes(blocker):
7534                                                         myblocker_uninstalls.remove(blocker)
7535                                                         solved_blockers.add(blocker)
7536
7537                                 retlist.append(node)
7538
7539                                 if (isinstance(node, Package) and \
7540                                         "uninstall" == node.operation) or \
7541                                         (uninst_task is not None and \
7542                                         uninst_task in scheduled_uninstalls):
7543                                         # Include satisfied blockers in the merge list
7544                                         # since the user might be interested and also
7545                                         # it serves as an indicator that blocking packages
7546                                         # will be temporarily installed simultaneously.
7547                                         for blocker in solved_blockers:
7548                                                 retlist.append(Blocker(atom=blocker.atom,
7549                                                         root=blocker.root, eapi=blocker.eapi,
7550                                                         satisfied=True))
7551
7552                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7553                 for node in myblocker_uninstalls.root_nodes():
7554                         unsolvable_blockers.add(node)
7555
7556                 for blocker in unsolvable_blockers:
7557                         retlist.append(blocker)
7558
7559                 # If any Uninstall tasks need to be executed in order
7560                 # to avoid a conflict, complete the graph with any
7561                 # dependencies that may have been initially
7562                 # neglected (to ensure that unsafe Uninstall tasks
7563                 # are properly identified and blocked from execution).
7564                 if have_uninstall_task and \
7565                         not complete and \
7566                         not unsolvable_blockers:
7567                         self.myparams.add("complete")
7568                         raise self._serialize_tasks_retry("")
7569
7570                 if unsolvable_blockers and \
7571                         not self._accept_blocker_conflicts():
7572                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7573                         self._serialized_tasks_cache = retlist[:]
7574                         self._scheduler_graph = scheduler_graph
7575                         raise self._unknown_internal_error()
7576
7577                 if self._slot_collision_info and \
7578                         not self._accept_blocker_conflicts():
7579                         self._serialized_tasks_cache = retlist[:]
7580                         self._scheduler_graph = scheduler_graph
7581                         raise self._unknown_internal_error()
7582
7583                 return retlist, scheduler_graph
7584
7585         def _show_circular_deps(self, mygraph):
7586                 # No leaf nodes are available, so we have a circular
7587                 # dependency panic situation.  Reduce the noise level to a
7588                 # minimum via repeated elimination of root nodes since they
7589                 # have no parents and thus can not be part of a cycle.
7590                 while True:
7591                         root_nodes = mygraph.root_nodes(
7592                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7593                         if not root_nodes:
7594                                 break
7595                         mygraph.difference_update(root_nodes)
7596                 # Display the USE flags that are enabled on nodes that are part
7597                 # of dependency cycles in case that helps the user decide to
7598                 # disable some of them.
7599                 display_order = []
7600                 tempgraph = mygraph.copy()
7601                 while not tempgraph.empty():
7602                         nodes = tempgraph.leaf_nodes()
7603                         if not nodes:
7604                                 node = tempgraph.order[0]
7605                         else:
7606                                 node = nodes[0]
7607                         display_order.append(node)
7608                         tempgraph.remove(node)
7609                 display_order.reverse()
7610                 self.myopts.pop("--quiet", None)
7611                 self.myopts.pop("--verbose", None)
7612                 self.myopts["--tree"] = True
7613                 portage.writemsg("\n\n", noiselevel=-1)
7614                 self.display(display_order)
7615                 prefix = colorize("BAD", " * ")
7616                 portage.writemsg("\n", noiselevel=-1)
7617                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7618                         noiselevel=-1)
7619                 portage.writemsg("\n", noiselevel=-1)
7620                 mygraph.debug_print()
7621                 portage.writemsg("\n", noiselevel=-1)
7622                 portage.writemsg(prefix + "Note that circular dependencies " + \
7623                         "can often be avoided by temporarily\n", noiselevel=-1)
7624                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7625                         "optional dependencies.\n", noiselevel=-1)
7626
7627         def _show_merge_list(self):
7628                 if self._serialized_tasks_cache is not None and \
7629                         not (self._displayed_list and \
7630                         (self._displayed_list == self._serialized_tasks_cache or \
7631                         self._displayed_list == \
7632                                 list(reversed(self._serialized_tasks_cache)))):
7633                         display_list = self._serialized_tasks_cache[:]
7634                         if "--tree" in self.myopts:
7635                                 display_list.reverse()
7636                         self.display(display_list)
7637
7638         def _show_unsatisfied_blockers(self, blockers):
7639                 self._show_merge_list()
7640                 msg = "Error: The above package list contains " + \
7641                         "packages which cannot be installed " + \
7642                         "at the same time on the same system."
7643                 prefix = colorize("BAD", " * ")
7644                 from textwrap import wrap
7645                 portage.writemsg("\n", noiselevel=-1)
7646                 for line in wrap(msg, 70):
7647                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7648
7649                 # Display the conflicting packages along with the packages
7650                 # that pulled them in. This is helpful for troubleshooting
7651                 # cases in which blockers don't solve automatically and
7652                 # the reasons are not apparent from the normal merge list
7653                 # display.
7654
7655                 conflict_pkgs = {}
7656                 for blocker in blockers:
7657                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7658                                 self._blocker_parents.parent_nodes(blocker)):
7659                                 parent_atoms = self._parent_atoms.get(pkg)
7660                                 if not parent_atoms:
7661                                         atom = self._blocked_world_pkgs.get(pkg)
7662                                         if atom is not None:
7663                                                 parent_atoms = set([("@world", atom)])
7664                                 if parent_atoms:
7665                                         conflict_pkgs[pkg] = parent_atoms
7666
7667                 if conflict_pkgs:
7668                         # Reduce noise by pruning packages that are only
7669                         # pulled in by other conflict packages.
7670                         pruned_pkgs = set()
7671                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7672                                 relevant_parent = False
7673                                 for parent, atom in parent_atoms:
7674                                         if parent not in conflict_pkgs:
7675                                                 relevant_parent = True
7676                                                 break
7677                                 if not relevant_parent:
7678                                         pruned_pkgs.add(pkg)
7679                         for pkg in pruned_pkgs:
7680                                 del conflict_pkgs[pkg]
7681
7682                 if conflict_pkgs:
7683                         msg = []
7684                         msg.append("\n")
7685                         indent = "  "
7686                         # Max number of parents shown, to avoid flooding the display.
7687                         max_parents = 3
7688                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7689
7690                                 pruned_list = set()
7691
7692                                 # Prefer packages that are not directly involved in a conflict.
7693                                 for parent_atom in parent_atoms:
7694                                         if len(pruned_list) >= max_parents:
7695                                                 break
7696                                         parent, atom = parent_atom
7697                                         if parent not in conflict_pkgs:
7698                                                 pruned_list.add(parent_atom)
7699
7700                                 for parent_atom in parent_atoms:
7701                                         if len(pruned_list) >= max_parents:
7702                                                 break
7703                                         pruned_list.add(parent_atom)
7704
7705                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7706                                 msg.append(indent + "%s pulled in by\n" % pkg)
7707
7708                                 for parent_atom in pruned_list:
7709                                         parent, atom = parent_atom
7710                                         msg.append(2*indent)
7711                                         if isinstance(parent,
7712                                                 (PackageArg, AtomArg)):
7713                                                 # For PackageArg and AtomArg types, it's
7714                                                 # redundant to display the atom attribute.
7715                                                 msg.append(str(parent))
7716                                         else:
7717                                                 # Display the specific atom from SetArg or
7718                                                 # Package types.
7719                                                 msg.append("%s required by %s" % (atom, parent))
7720                                         msg.append("\n")
7721
7722                                 if omitted_parents:
7723                                         msg.append(2*indent)
7724                                         msg.append("(and %d more)\n" % omitted_parents)
7725
7726                                 msg.append("\n")
7727
7728                         sys.stderr.write("".join(msg))
7729                         sys.stderr.flush()
7730
7731                 if "--quiet" not in self.myopts:
7732                         show_blocker_docs_link()
7733
7734         def display(self, mylist, favorites=[], verbosity=None):
7735
7736                 # This is used to prevent display_problems() from
7737                 # redundantly displaying this exact same merge list
7738                 # again via _show_merge_list().
7739                 self._displayed_list = mylist
7740
7741                 if verbosity is None:
7742                         verbosity = ("--quiet" in self.myopts and 1 or \
7743                                 "--verbose" in self.myopts and 3 or 2)
7744                 favorites_set = InternalPackageSet(favorites)
7745                 oneshot = "--oneshot" in self.myopts or \
7746                         "--onlydeps" in self.myopts
7747                 columns = "--columns" in self.myopts
7748                 changelogs=[]
7749                 p=[]
7750                 blockers = []
7751
7752                 counters = PackageCounters()
7753
7754                 if verbosity == 1 and "--verbose" not in self.myopts:
7755                         def create_use_string(*args):
7756                                 return ""
7757                 else:
7758                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7759                                 old_iuse, old_use,
7760                                 is_new, reinst_flags,
7761                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7762                                 alphabetical=("--alphabetical" in self.myopts)):
7763                                 enabled = []
7764                                 if alphabetical:
7765                                         disabled = enabled
7766                                         removed = enabled
7767                                 else:
7768                                         disabled = []
7769                                         removed = []
7770                                 cur_iuse = set(cur_iuse)
7771                                 enabled_flags = cur_iuse.intersection(cur_use)
7772                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7773                                 any_iuse = cur_iuse.union(old_iuse)
7774                                 any_iuse = list(any_iuse)
7775                                 any_iuse.sort()
7776                                 for flag in any_iuse:
7777                                         flag_str = None
7778                                         isEnabled = False
7779                                         reinst_flag = reinst_flags and flag in reinst_flags
7780                                         if flag in enabled_flags:
7781                                                 isEnabled = True
7782                                                 if is_new or flag in old_use and \
7783                                                         (all_flags or reinst_flag):
7784                                                         flag_str = red(flag)
7785                                                 elif flag not in old_iuse:
7786                                                         flag_str = yellow(flag) + "%*"
7787                                                 elif flag not in old_use:
7788                                                         flag_str = green(flag) + "*"
7789                                         elif flag in removed_iuse:
7790                                                 if all_flags or reinst_flag:
7791                                                         flag_str = yellow("-" + flag) + "%"
7792                                                         if flag in old_use:
7793                                                                 flag_str += "*"
7794                                                         flag_str = "(" + flag_str + ")"
7795                                                         removed.append(flag_str)
7796                                                 continue
7797                                         else:
7798                                                 if is_new or flag in old_iuse and \
7799                                                         flag not in old_use and \
7800                                                         (all_flags or reinst_flag):
7801                                                         flag_str = blue("-" + flag)
7802                                                 elif flag not in old_iuse:
7803                                                         flag_str = yellow("-" + flag)
7804                                                         if flag not in iuse_forced:
7805                                                                 flag_str += "%"
7806                                                 elif flag in old_use:
7807                                                         flag_str = green("-" + flag) + "*"
7808                                         if flag_str:
7809                                                 if flag in iuse_forced:
7810                                                         flag_str = "(" + flag_str + ")"
7811                                                 if isEnabled:
7812                                                         enabled.append(flag_str)
7813                                                 else:
7814                                                         disabled.append(flag_str)
7815
7816                                 if alphabetical:
7817                                         ret = " ".join(enabled)
7818                                 else:
7819                                         ret = " ".join(enabled + disabled + removed)
7820                                 if ret:
7821                                         ret = '%s="%s" ' % (name, ret)
7822                                 return ret
7823
7824                 repo_display = RepoDisplay(self.roots)
7825
7826                 tree_nodes = []
7827                 display_list = []
7828                 mygraph = self.digraph.copy()
7829
7830                 # If there are any Uninstall instances, add the corresponding
7831                 # blockers to the digraph (useful for --tree display).
7832
7833                 executed_uninstalls = set(node for node in mylist \
7834                         if isinstance(node, Package) and node.operation == "unmerge")
7835
7836                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7837                         uninstall_parents = \
7838                                 self._blocker_uninstalls.parent_nodes(uninstall)
7839                         if not uninstall_parents:
7840                                 continue
7841
7842                         # Remove the corresponding "nomerge" node and substitute
7843                         # the Uninstall node.
7844                         inst_pkg = self._pkg_cache[
7845                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7846                         try:
7847                                 mygraph.remove(inst_pkg)
7848                         except KeyError:
7849                                 pass
7850
7851                         try:
7852                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7853                         except KeyError:
7854                                 inst_pkg_blockers = []
7855
7856                         # Break the Package -> Uninstall edges.
7857                         mygraph.remove(uninstall)
7858
7859                         # Resolution of a package's blockers
7860                         # depend on it's own uninstallation.
7861                         for blocker in inst_pkg_blockers:
7862                                 mygraph.add(uninstall, blocker)
7863
7864                         # Expand Package -> Uninstall edges into
7865                         # Package -> Blocker -> Uninstall edges.
7866                         for blocker in uninstall_parents:
7867                                 mygraph.add(uninstall, blocker)
7868                                 for parent in self._blocker_parents.parent_nodes(blocker):
7869                                         if parent != inst_pkg:
7870                                                 mygraph.add(blocker, parent)
7871
7872                         # If the uninstall task did not need to be executed because
7873                         # of an upgrade, display Blocker -> Upgrade edges since the
7874                         # corresponding Blocker -> Uninstall edges will not be shown.
7875                         upgrade_node = \
7876                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7877                         if upgrade_node is not None and \
7878                                 uninstall not in executed_uninstalls:
7879                                 for blocker in uninstall_parents:
7880                                         mygraph.add(upgrade_node, blocker)
7881
7882                 unsatisfied_blockers = []
7883                 i = 0
7884                 depth = 0
7885                 shown_edges = set()
7886                 for x in mylist:
7887                         if isinstance(x, Blocker) and not x.satisfied:
7888                                 unsatisfied_blockers.append(x)
7889                                 continue
7890                         graph_key = x
7891                         if "--tree" in self.myopts:
7892                                 depth = len(tree_nodes)
7893                                 while depth and graph_key not in \
7894                                         mygraph.child_nodes(tree_nodes[depth-1]):
7895                                                 depth -= 1
7896                                 if depth:
7897                                         tree_nodes = tree_nodes[:depth]
7898                                         tree_nodes.append(graph_key)
7899                                         display_list.append((x, depth, True))
7900                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7901                                 else:
7902                                         traversed_nodes = set() # prevent endless circles
7903                                         traversed_nodes.add(graph_key)
7904                                         def add_parents(current_node, ordered):
7905                                                 parent_nodes = None
7906                                                 # Do not traverse to parents if this node is an
7907                                                 # an argument or a direct member of a set that has
7908                                                 # been specified as an argument (system or world).
7909                                                 if current_node not in self._set_nodes:
7910                                                         parent_nodes = mygraph.parent_nodes(current_node)
7911                                                 if parent_nodes:
7912                                                         child_nodes = set(mygraph.child_nodes(current_node))
7913                                                         selected_parent = None
7914                                                         # First, try to avoid a direct cycle.
7915                                                         for node in parent_nodes:
7916                                                                 if not isinstance(node, (Blocker, Package)):
7917                                                                         continue
7918                                                                 if node not in traversed_nodes and \
7919                                                                         node not in child_nodes:
7920                                                                         edge = (current_node, node)
7921                                                                         if edge in shown_edges:
7922                                                                                 continue
7923                                                                         selected_parent = node
7924                                                                         break
7925                                                         if not selected_parent:
7926                                                                 # A direct cycle is unavoidable.
7927                                                                 for node in parent_nodes:
7928                                                                         if not isinstance(node, (Blocker, Package)):
7929                                                                                 continue
7930                                                                         if node not in traversed_nodes:
7931                                                                                 edge = (current_node, node)
7932                                                                                 if edge in shown_edges:
7933                                                                                         continue
7934                                                                                 selected_parent = node
7935                                                                                 break
7936                                                         if selected_parent:
7937                                                                 shown_edges.add((current_node, selected_parent))
7938                                                                 traversed_nodes.add(selected_parent)
7939                                                                 add_parents(selected_parent, False)
7940                                                 display_list.append((current_node,
7941                                                         len(tree_nodes), ordered))
7942                                                 tree_nodes.append(current_node)
7943                                         tree_nodes = []
7944                                         add_parents(graph_key, True)
7945                         else:
7946                                 display_list.append((x, depth, True))
7947                 mylist = display_list
7948                 for x in unsatisfied_blockers:
7949                         mylist.append((x, 0, True))
7950
7951                 last_merge_depth = 0
7952                 for i in xrange(len(mylist)-1,-1,-1):
7953                         graph_key, depth, ordered = mylist[i]
7954                         if not ordered and depth == 0 and i > 0 \
7955                                 and graph_key == mylist[i-1][0] and \
7956                                 mylist[i-1][1] == 0:
7957                                 # An ordered node got a consecutive duplicate when the tree was
7958                                 # being filled in.
7959                                 del mylist[i]
7960                                 continue
7961                         if ordered and graph_key[-1] != "nomerge":
7962                                 last_merge_depth = depth
7963                                 continue
7964                         if depth >= last_merge_depth or \
7965                                 i < len(mylist) - 1 and \
7966                                 depth >= mylist[i+1][1]:
7967                                         del mylist[i]
7968
7969                 from portage import flatten
7970                 from portage.dep import use_reduce, paren_reduce
7971                 # files to fetch list - avoids counting a same file twice
7972                 # in size display (verbose mode)
7973                 myfetchlist=[]
7974
7975                 # Use this set to detect when all the "repoadd" strings are "[0]"
7976                 # and disable the entire repo display in this case.
7977                 repoadd_set = set()
7978
7979                 for mylist_index in xrange(len(mylist)):
7980                         x, depth, ordered = mylist[mylist_index]
7981                         pkg_type = x[0]
7982                         myroot = x[1]
7983                         pkg_key = x[2]
7984                         portdb = self.trees[myroot]["porttree"].dbapi
7985                         bindb  = self.trees[myroot]["bintree"].dbapi
7986                         vardb = self.trees[myroot]["vartree"].dbapi
7987                         vartree = self.trees[myroot]["vartree"]
7988                         pkgsettings = self.pkgsettings[myroot]
7989
7990                         fetch=" "
7991                         indent = " " * depth
7992
7993                         if isinstance(x, Blocker):
7994                                 if x.satisfied:
7995                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7996                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7997                                 else:
7998                                         blocker_style = "PKG_BLOCKER"
7999                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8000                                 if ordered:
8001                                         counters.blocks += 1
8002                                         if x.satisfied:
8003                                                 counters.blocks_satisfied += 1
8004                                 resolved = portage.key_expand(
8005                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8006                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8007                                         addl += " " + colorize(blocker_style, resolved)
8008                                 else:
8009                                         addl = "[%s %s] %s%s" % \
8010                                                 (colorize(blocker_style, "blocks"),
8011                                                 addl, indent, colorize(blocker_style, resolved))
8012                                 block_parents = self._blocker_parents.parent_nodes(x)
8013                                 block_parents = set([pnode[2] for pnode in block_parents])
8014                                 block_parents = ", ".join(block_parents)
8015                                 if resolved!=x[2]:
8016                                         addl += colorize(blocker_style,
8017                                                 " (\"%s\" is blocking %s)") % \
8018                                                 (str(x.atom).lstrip("!"), block_parents)
8019                                 else:
8020                                         addl += colorize(blocker_style,
8021                                                 " (is blocking %s)") % block_parents
8022                                 if isinstance(x, Blocker) and x.satisfied:
8023                                         if columns:
8024                                                 continue
8025                                         p.append(addl)
8026                                 else:
8027                                         blockers.append(addl)
8028                         else:
8029                                 pkg_status = x[3]
8030                                 pkg_merge = ordered and pkg_status == "merge"
8031                                 if not pkg_merge and pkg_status == "merge":
8032                                         pkg_status = "nomerge"
8033                                 built = pkg_type != "ebuild"
8034                                 installed = pkg_type == "installed"
8035                                 pkg = x
8036                                 metadata = pkg.metadata
8037                                 ebuild_path = None
8038                                 repo_name = metadata["repository"]
8039                                 if pkg_type == "ebuild":
8040                                         ebuild_path = portdb.findname(pkg_key)
8041                                         if not ebuild_path: # shouldn't happen
8042                                                 raise portage.exception.PackageNotFound(pkg_key)
8043                                         repo_path_real = os.path.dirname(os.path.dirname(
8044                                                 os.path.dirname(ebuild_path)))
8045                                 else:
8046                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8047                                 pkg_use = list(pkg.use.enabled)
8048                                 try:
8049                                         restrict = flatten(use_reduce(paren_reduce(
8050                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8051                                 except portage.exception.InvalidDependString, e:
8052                                         if not pkg.installed:
8053                                                 show_invalid_depstring_notice(x,
8054                                                         pkg.metadata["RESTRICT"], str(e))
8055                                                 del e
8056                                                 return 1
8057                                         restrict = []
8058                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8059                                         "fetch" in restrict:
8060                                         fetch = red("F")
8061                                         if ordered:
8062                                                 counters.restrict_fetch += 1
8063                                         if portdb.fetch_check(pkg_key, pkg_use):
8064                                                 fetch = green("f")
8065                                                 if ordered:
8066                                                         counters.restrict_fetch_satisfied += 1
8067
8068                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8069                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8070                                 myoldbest = []
8071                                 myinslotlist = None
8072                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8073                                 if vardb.cpv_exists(pkg_key):
8074                                         addl="  "+yellow("R")+fetch+"  "
8075                                         if ordered:
8076                                                 if pkg_merge:
8077                                                         counters.reinst += 1
8078                                                 elif pkg_status == "uninstall":
8079                                                         counters.uninst += 1
8080                                 # filter out old-style virtual matches
8081                                 elif installed_versions and \
8082                                         portage.cpv_getkey(installed_versions[0]) == \
8083                                         portage.cpv_getkey(pkg_key):
8084                                         myinslotlist = vardb.match(pkg.slot_atom)
8085                                         # If this is the first install of a new-style virtual, we
8086                                         # need to filter out old-style virtual matches.
8087                                         if myinslotlist and \
8088                                                 portage.cpv_getkey(myinslotlist[0]) != \
8089                                                 portage.cpv_getkey(pkg_key):
8090                                                 myinslotlist = None
8091                                         if myinslotlist:
8092                                                 myoldbest = myinslotlist[:]
8093                                                 addl = "   " + fetch
8094                                                 if not portage.dep.cpvequal(pkg_key,
8095                                                         portage.best([pkg_key] + myoldbest)):
8096                                                         # Downgrade in slot
8097                                                         addl += turquoise("U")+blue("D")
8098                                                         if ordered:
8099                                                                 counters.downgrades += 1
8100                                                 else:
8101                                                         # Update in slot
8102                                                         addl += turquoise("U") + " "
8103                                                         if ordered:
8104                                                                 counters.upgrades += 1
8105                                         else:
8106                                                 # New slot, mark it new.
8107                                                 addl = " " + green("NS") + fetch + "  "
8108                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8109                                                 if ordered:
8110                                                         counters.newslot += 1
8111
8112                                         if "--changelog" in self.myopts:
8113                                                 inst_matches = vardb.match(pkg.slot_atom)
8114                                                 if inst_matches:
8115                                                         changelogs.extend(self.calc_changelog(
8116                                                                 portdb.findname(pkg_key),
8117                                                                 inst_matches[0], pkg_key))
8118                                 else:
8119                                         addl = " " + green("N") + " " + fetch + "  "
8120                                         if ordered:
8121                                                 counters.new += 1
8122
8123                                 verboseadd = ""
8124                                 repoadd = None
8125
8126                                 if True:
8127                                         # USE flag display
8128                                         forced_flags = set()
8129                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8130                                         forced_flags.update(pkgsettings.useforce)
8131                                         forced_flags.update(pkgsettings.usemask)
8132
8133                                         cur_use = [flag for flag in pkg.use.enabled \
8134                                                 if flag in pkg.iuse.all]
8135                                         cur_iuse = sorted(pkg.iuse.all)
8136
8137                                         if myoldbest and myinslotlist:
8138                                                 previous_cpv = myoldbest[0]
8139                                         else:
8140                                                 previous_cpv = pkg.cpv
8141                                         if vardb.cpv_exists(previous_cpv):
8142                                                 old_iuse, old_use = vardb.aux_get(
8143                                                                 previous_cpv, ["IUSE", "USE"])
8144                                                 old_iuse = list(set(
8145                                                         filter_iuse_defaults(old_iuse.split())))
8146                                                 old_iuse.sort()
8147                                                 old_use = old_use.split()
8148                                                 is_new = False
8149                                         else:
8150                                                 old_iuse = []
8151                                                 old_use = []
8152                                                 is_new = True
8153
8154                                         old_use = [flag for flag in old_use if flag in old_iuse]
8155
8156                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8157                                         use_expand.sort()
8158                                         use_expand.reverse()
8159                                         use_expand_hidden = \
8160                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8161
8162                                         def map_to_use_expand(myvals, forcedFlags=False,
8163                                                 removeHidden=True):
8164                                                 ret = {}
8165                                                 forced = {}
8166                                                 for exp in use_expand:
8167                                                         ret[exp] = []
8168                                                         forced[exp] = set()
8169                                                         for val in myvals[:]:
8170                                                                 if val.startswith(exp.lower()+"_"):
8171                                                                         if val in forced_flags:
8172                                                                                 forced[exp].add(val[len(exp)+1:])
8173                                                                         ret[exp].append(val[len(exp)+1:])
8174                                                                         myvals.remove(val)
8175                                                 ret["USE"] = myvals
8176                                                 forced["USE"] = [val for val in myvals \
8177                                                         if val in forced_flags]
8178                                                 if removeHidden:
8179                                                         for exp in use_expand_hidden:
8180                                                                 ret.pop(exp, None)
8181                                                 if forcedFlags:
8182                                                         return ret, forced
8183                                                 return ret
8184
8185                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8186                                         # are the only thing that triggered reinstallation.
8187                                         reinst_flags_map = {}
8188                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8189                                         reinst_expand_map = None
8190                                         if reinstall_for_flags:
8191                                                 reinst_flags_map = map_to_use_expand(
8192                                                         list(reinstall_for_flags), removeHidden=False)
8193                                                 for k in list(reinst_flags_map):
8194                                                         if not reinst_flags_map[k]:
8195                                                                 del reinst_flags_map[k]
8196                                                 if not reinst_flags_map.get("USE"):
8197                                                         reinst_expand_map = reinst_flags_map.copy()
8198                                                         reinst_expand_map.pop("USE", None)
8199                                         if reinst_expand_map and \
8200                                                 not set(reinst_expand_map).difference(
8201                                                 use_expand_hidden):
8202                                                 use_expand_hidden = \
8203                                                         set(use_expand_hidden).difference(
8204                                                         reinst_expand_map)
8205
8206                                         cur_iuse_map, iuse_forced = \
8207                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8208                                         cur_use_map = map_to_use_expand(cur_use)
8209                                         old_iuse_map = map_to_use_expand(old_iuse)
8210                                         old_use_map = map_to_use_expand(old_use)
8211
8212                                         use_expand.sort()
8213                                         use_expand.insert(0, "USE")
8214                                         
8215                                         for key in use_expand:
8216                                                 if key in use_expand_hidden:
8217                                                         continue
8218                                                 verboseadd += create_use_string(key.upper(),
8219                                                         cur_iuse_map[key], iuse_forced[key],
8220                                                         cur_use_map[key], old_iuse_map[key],
8221                                                         old_use_map[key], is_new,
8222                                                         reinst_flags_map.get(key))
8223
8224                                 if verbosity == 3:
8225                                         # size verbose
8226                                         mysize=0
8227                                         if pkg_type == "ebuild" and pkg_merge:
8228                                                 try:
8229                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8230                                                                 useflags=pkg_use, debug=self.edebug)
8231                                                 except portage.exception.InvalidDependString, e:
8232                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8233                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8234                                                         del e
8235                                                         return 1
8236                                                 if myfilesdict is None:
8237                                                         myfilesdict="[empty/missing/bad digest]"
8238                                                 else:
8239                                                         for myfetchfile in myfilesdict:
8240                                                                 if myfetchfile not in myfetchlist:
8241                                                                         mysize+=myfilesdict[myfetchfile]
8242                                                                         myfetchlist.append(myfetchfile)
8243                                                         if ordered:
8244                                                                 counters.totalsize += mysize
8245                                                 verboseadd += format_size(mysize)
8246
8247                                         # overlay verbose
8248                                         # assign index for a previous version in the same slot
8249                                         has_previous = False
8250                                         repo_name_prev = None
8251                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8252                                                 metadata["SLOT"])
8253                                         slot_matches = vardb.match(slot_atom)
8254                                         if slot_matches:
8255                                                 has_previous = True
8256                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8257                                                         ["repository"])[0]
8258
8259                                         # now use the data to generate output
8260                                         if pkg.installed or not has_previous:
8261                                                 repoadd = repo_display.repoStr(repo_path_real)
8262                                         else:
8263                                                 repo_path_prev = None
8264                                                 if repo_name_prev:
8265                                                         repo_path_prev = portdb.getRepositoryPath(
8266                                                                 repo_name_prev)
8267                                                 if repo_path_prev == repo_path_real:
8268                                                         repoadd = repo_display.repoStr(repo_path_real)
8269                                                 else:
8270                                                         repoadd = "%s=>%s" % (
8271                                                                 repo_display.repoStr(repo_path_prev),
8272                                                                 repo_display.repoStr(repo_path_real))
8273                                         if repoadd:
8274                                                 repoadd_set.add(repoadd)
8275
8276                                 xs = [portage.cpv_getkey(pkg_key)] + \
8277                                         list(portage.catpkgsplit(pkg_key)[2:])
8278                                 if xs[2] == "r0":
8279                                         xs[2] = ""
8280                                 else:
8281                                         xs[2] = "-" + xs[2]
8282
8283                                 mywidth = 130
8284                                 if "COLUMNWIDTH" in self.settings:
8285                                         try:
8286                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8287                                         except ValueError, e:
8288                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8289                                                 portage.writemsg(
8290                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8291                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8292                                                 del e
8293                                 oldlp = mywidth - 30
8294                                 newlp = oldlp - 30
8295
8296                                 # Convert myoldbest from a list to a string.
8297                                 if not myoldbest:
8298                                         myoldbest = ""
8299                                 else:
8300                                         for pos, key in enumerate(myoldbest):
8301                                                 key = portage.catpkgsplit(key)[2] + \
8302                                                         "-" + portage.catpkgsplit(key)[3]
8303                                                 if key[-3:] == "-r0":
8304                                                         key = key[:-3]
8305                                                 myoldbest[pos] = key
8306                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8307
8308                                 pkg_cp = xs[0]
8309                                 root_config = self.roots[myroot]
8310                                 system_set = root_config.sets["system"]
8311                                 world_set  = root_config.sets["world"]
8312
8313                                 pkg_system = False
8314                                 pkg_world = False
8315                                 try:
8316                                         pkg_system = system_set.findAtomForPackage(pkg)
8317                                         pkg_world  = world_set.findAtomForPackage(pkg)
8318                                         if not (oneshot or pkg_world) and \
8319                                                 myroot == self.target_root and \
8320                                                 favorites_set.findAtomForPackage(pkg):
8321                                                 # Maybe it will be added to world now.
8322                                                 if create_world_atom(pkg, favorites_set, root_config):
8323                                                         pkg_world = True
8324                                 except portage.exception.InvalidDependString:
8325                                         # This is reported elsewhere if relevant.
8326                                         pass
8327
8328                                 def pkgprint(pkg_str):
8329                                         if pkg_merge:
8330                                                 if pkg_system:
8331                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8332                                                 elif pkg_world:
8333                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8334                                                 else:
8335                                                         return colorize("PKG_MERGE", pkg_str)
8336                                         elif pkg_status == "uninstall":
8337                                                 return colorize("PKG_UNINSTALL", pkg_str)
8338                                         else:
8339                                                 if pkg_system:
8340                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8341                                                 elif pkg_world:
8342                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8343                                                 else:
8344                                                         return colorize("PKG_NOMERGE", pkg_str)
8345
8346                                 try:
8347                                         properties = flatten(use_reduce(paren_reduce(
8348                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8349                                 except portage.exception.InvalidDependString, e:
8350                                         if not pkg.installed:
8351                                                 show_invalid_depstring_notice(pkg,
8352                                                         pkg.metadata["PROPERTIES"], str(e))
8353                                                 del e
8354                                                 return 1
8355                                         properties = []
8356                                 interactive = "interactive" in properties
8357                                 if interactive and pkg.operation == "merge":
8358                                         addl = colorize("WARN", "I") + addl[1:]
8359                                         if ordered:
8360                                                 counters.interactive += 1
8361
8362                                 if x[1]!="/":
8363                                         if myoldbest:
8364                                                 myoldbest +=" "
8365                                         if "--columns" in self.myopts:
8366                                                 if "--quiet" in self.myopts:
8367                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8368                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8369                                                         myprint=myprint+myoldbest
8370                                                         myprint=myprint+darkgreen("to "+x[1])
8371                                                         verboseadd = None
8372                                                 else:
8373                                                         if not pkg_merge:
8374                                                                 myprint = "[%s] %s%s" % \
8375                                                                         (pkgprint(pkg_status.ljust(13)),
8376                                                                         indent, pkgprint(pkg.cp))
8377                                                         else:
8378                                                                 myprint = "[%s %s] %s%s" % \
8379                                                                         (pkgprint(pkg.type_name), addl,
8380                                                                         indent, pkgprint(pkg.cp))
8381                                                         if (newlp-nc_len(myprint)) > 0:
8382                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8383                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8384                                                         if (oldlp-nc_len(myprint)) > 0:
8385                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8386                                                         myprint=myprint+myoldbest
8387                                                         myprint += darkgreen("to " + pkg.root)
8388                                         else:
8389                                                 if not pkg_merge:
8390                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8391                                                 else:
8392                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8393                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8394                                                         myoldbest + darkgreen("to " + myroot)
8395                                 else:
8396                                         if "--columns" in self.myopts:
8397                                                 if "--quiet" in self.myopts:
8398                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8399                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8400                                                         myprint=myprint+myoldbest
8401                                                         verboseadd = None
8402                                                 else:
8403                                                         if not pkg_merge:
8404                                                                 myprint = "[%s] %s%s" % \
8405                                                                         (pkgprint(pkg_status.ljust(13)),
8406                                                                         indent, pkgprint(pkg.cp))
8407                                                         else:
8408                                                                 myprint = "[%s %s] %s%s" % \
8409                                                                         (pkgprint(pkg.type_name), addl,
8410                                                                         indent, pkgprint(pkg.cp))
8411                                                         if (newlp-nc_len(myprint)) > 0:
8412                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8413                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8414                                                         if (oldlp-nc_len(myprint)) > 0:
8415                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8416                                                         myprint += myoldbest
8417                                         else:
8418                                                 if not pkg_merge:
8419                                                         myprint = "[%s] %s%s %s" % \
8420                                                                 (pkgprint(pkg_status.ljust(13)),
8421                                                                 indent, pkgprint(pkg.cpv),
8422                                                                 myoldbest)
8423                                                 else:
8424                                                         myprint = "[%s %s] %s%s %s" % \
8425                                                                 (pkgprint(pkg_type), addl, indent,
8426                                                                 pkgprint(pkg.cpv), myoldbest)
8427
8428                                 if columns and pkg.operation == "uninstall":
8429                                         continue
8430                                 p.append((myprint, verboseadd, repoadd))
8431
8432                                 if "--tree" not in self.myopts and \
8433                                         "--quiet" not in self.myopts and \
8434                                         not self._opts_no_restart.intersection(self.myopts) and \
8435                                         pkg.root == self._running_root.root and \
8436                                         portage.match_from_list(
8437                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8438                                         not vardb.cpv_exists(pkg.cpv) and \
8439                                         "--quiet" not in self.myopts:
8440                                                 if mylist_index < len(mylist) - 1:
8441                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8442                                                         p.append(colorize("WARN", "    then resume the merge."))
8443
8444                 out = sys.stdout
8445                 show_repos = repoadd_set and repoadd_set != set(["0"])
8446
8447                 for x in p:
8448                         if isinstance(x, basestring):
8449                                 out.write("%s\n" % (x,))
8450                                 continue
8451
8452                         myprint, verboseadd, repoadd = x
8453
8454                         if verboseadd:
8455                                 myprint += " " + verboseadd
8456
8457                         if show_repos and repoadd:
8458                                 myprint += " " + teal("[%s]" % repoadd)
8459
8460                         out.write("%s\n" % (myprint,))
8461
8462                 for x in blockers:
8463                         print x
8464
8465                 if verbosity == 3:
8466                         print
8467                         print counters
8468                         if show_repos:
8469                                 sys.stdout.write(str(repo_display))
8470
8471                 if "--changelog" in self.myopts:
8472                         print
8473                         for revision,text in changelogs:
8474                                 print bold('*'+revision)
8475                                 sys.stdout.write(text)
8476
8477                 sys.stdout.flush()
8478                 return os.EX_OK
8479
8480         def display_problems(self):
8481                 """
8482                 Display problems with the dependency graph such as slot collisions.
8483                 This is called internally by display() to show the problems _after_
8484                 the merge list where it is most likely to be seen, but if display()
8485                 is not going to be called then this method should be called explicitly
8486                 to ensure that the user is notified of problems with the graph.
8487
8488                 All output goes to stderr, except for unsatisfied dependencies which
8489                 go to stdout for parsing by programs such as autounmask.
8490                 """
8491
8492                 # Note that show_masked_packages() sends it's output to
8493                 # stdout, and some programs such as autounmask parse the
8494                 # output in cases when emerge bails out. However, when
8495                 # show_masked_packages() is called for installed packages
8496                 # here, the message is a warning that is more appropriate
8497                 # to send to stderr, so temporarily redirect stdout to
8498                 # stderr. TODO: Fix output code so there's a cleaner way
8499                 # to redirect everything to stderr.
8500                 sys.stdout.flush()
8501                 sys.stderr.flush()
8502                 stdout = sys.stdout
8503                 try:
8504                         sys.stdout = sys.stderr
8505                         self._display_problems()
8506                 finally:
8507                         sys.stdout = stdout
8508                         sys.stdout.flush()
8509                         sys.stderr.flush()
8510
8511                 # This goes to stdout for parsing by programs like autounmask.
8512                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8513                         self._show_unsatisfied_dep(*pargs, **kwargs)
8514
8515         def _display_problems(self):
8516                 if self._circular_deps_for_display is not None:
8517                         self._show_circular_deps(
8518                                 self._circular_deps_for_display)
8519
8520                 # The user is only notified of a slot conflict if
8521                 # there are no unresolvable blocker conflicts.
8522                 if self._unsatisfied_blockers_for_display is not None:
8523                         self._show_unsatisfied_blockers(
8524                                 self._unsatisfied_blockers_for_display)
8525                 else:
8526                         self._show_slot_collision_notice()
8527
8528                 # TODO: Add generic support for "set problem" handlers so that
8529                 # the below warnings aren't special cases for world only.
8530
8531                 if self._missing_args:
8532                         world_problems = False
8533                         if "world" in self._sets:
8534                                 # Filter out indirect members of world (from nested sets)
8535                                 # since only direct members of world are desired here.
8536                                 world_set = self.roots[self.target_root].sets["world"]
8537                                 for arg, atom in self._missing_args:
8538                                         if arg.name == "world" and atom in world_set:
8539                                                 world_problems = True
8540                                                 break
8541
8542                         if world_problems:
8543                                 sys.stderr.write("\n!!! Problems have been " + \
8544                                         "detected with your world file\n")
8545                                 sys.stderr.write("!!! Please run " + \
8546                                         green("emaint --check world")+"\n\n")
8547
8548                 if self._missing_args:
8549                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8550                                 " Ebuilds for the following packages are either all\n")
8551                         sys.stderr.write(colorize("BAD", "!!!") + \
8552                                 " masked or don't exist:\n")
8553                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8554                                 self._missing_args) + "\n")
8555
8556                 if self._pprovided_args:
8557                         arg_refs = {}
8558                         for arg, atom in self._pprovided_args:
8559                                 if isinstance(arg, SetArg):
8560                                         parent = arg.name
8561                                         arg_atom = (atom, atom)
8562                                 else:
8563                                         parent = "args"
8564                                         arg_atom = (arg.arg, atom)
8565                                 refs = arg_refs.setdefault(arg_atom, [])
8566                                 if parent not in refs:
8567                                         refs.append(parent)
8568                         msg = []
8569                         msg.append(bad("\nWARNING: "))
8570                         if len(self._pprovided_args) > 1:
8571                                 msg.append("Requested packages will not be " + \
8572                                         "merged because they are listed in\n")
8573                         else:
8574                                 msg.append("A requested package will not be " + \
8575                                         "merged because it is listed in\n")
8576                         msg.append("package.provided:\n\n")
8577                         problems_sets = set()
8578                         for (arg, atom), refs in arg_refs.iteritems():
8579                                 ref_string = ""
8580                                 if refs:
8581                                         problems_sets.update(refs)
8582                                         refs.sort()
8583                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8584                                         ref_string = " pulled in by " + ref_string
8585                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8586                         msg.append("\n")
8587                         if "world" in problems_sets:
8588                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8589                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8590                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8591                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8592                                 msg.append("The best course of action depends on the reason that an offending\n")
8593                                 msg.append("package.provided entry exists.\n\n")
8594                         sys.stderr.write("".join(msg))
8595
8596                 masked_packages = []
8597                 for pkg in self._masked_installed:
8598                         root_config = pkg.root_config
8599                         pkgsettings = self.pkgsettings[pkg.root]
8600                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8601                         masked_packages.append((root_config, pkgsettings,
8602                                 pkg.cpv, pkg.metadata, mreasons))
8603                 if masked_packages:
8604                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8605                                 " The following installed packages are masked:\n")
8606                         show_masked_packages(masked_packages)
8607                         show_mask_docs()
8608                         print
8609
8610         def calc_changelog(self,ebuildpath,current,next):
8611                 if ebuildpath == None or not os.path.exists(ebuildpath):
8612                         return []
8613                 current = '-'.join(portage.catpkgsplit(current)[1:])
8614                 if current.endswith('-r0'):
8615                         current = current[:-3]
8616                 next = '-'.join(portage.catpkgsplit(next)[1:])
8617                 if next.endswith('-r0'):
8618                         next = next[:-3]
8619                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8620                 try:
8621                         changelog = open(changelogpath).read()
8622                 except SystemExit, e:
8623                         raise # Needed else can't exit
8624                 except:
8625                         return []
8626                 divisions = self.find_changelog_tags(changelog)
8627                 #print 'XX from',current,'to',next
8628                 #for div,text in divisions: print 'XX',div
8629                 # skip entries for all revisions above the one we are about to emerge
8630                 for i in range(len(divisions)):
8631                         if divisions[i][0]==next:
8632                                 divisions = divisions[i:]
8633                                 break
8634                 # find out how many entries we are going to display
8635                 for i in range(len(divisions)):
8636                         if divisions[i][0]==current:
8637                                 divisions = divisions[:i]
8638                                 break
8639                 else:
8640                     # couldnt find the current revision in the list. display nothing
8641                         return []
8642                 return divisions
8643
8644         def find_changelog_tags(self,changelog):
8645                 divs = []
8646                 release = None
8647                 while 1:
8648                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8649                         if match is None:
8650                                 if release is not None:
8651                                         divs.append((release,changelog))
8652                                 return divs
8653                         if release is not None:
8654                                 divs.append((release,changelog[:match.start()]))
8655                         changelog = changelog[match.end():]
8656                         release = match.group(1)
8657                         if release.endswith('.ebuild'):
8658                                 release = release[:-7]
8659                         if release.endswith('-r0'):
8660                                 release = release[:-3]
8661
8662         def saveNomergeFavorites(self):
8663                 """Find atoms in favorites that are not in the mergelist and add them
8664                 to the world file if necessary."""
8665                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8666                         "--oneshot", "--onlydeps", "--pretend"):
8667                         if x in self.myopts:
8668                                 return
8669                 root_config = self.roots[self.target_root]
8670                 world_set = root_config.sets["world"]
8671
8672                 world_locked = False
8673                 if hasattr(world_set, "lock"):
8674                         world_set.lock()
8675                         world_locked = True
8676
8677                 if hasattr(world_set, "load"):
8678                         world_set.load() # maybe it's changed on disk
8679
8680                 args_set = self._sets["args"]
8681                 portdb = self.trees[self.target_root]["porttree"].dbapi
8682                 added_favorites = set()
8683                 for x in self._set_nodes:
8684                         pkg_type, root, pkg_key, pkg_status = x
8685                         if pkg_status != "nomerge":
8686                                 continue
8687
8688                         try:
8689                                 myfavkey = create_world_atom(x, args_set, root_config)
8690                                 if myfavkey:
8691                                         if myfavkey in added_favorites:
8692                                                 continue
8693                                         added_favorites.add(myfavkey)
8694                         except portage.exception.InvalidDependString, e:
8695                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8696                                         (pkg_key, str(e)), noiselevel=-1)
8697                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8698                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8699                                 del e
8700                 all_added = []
8701                 for k in self._sets:
8702                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8703                                 continue
8704                         s = SETPREFIX + k
8705                         if s in world_set:
8706                                 continue
8707                         all_added.append(SETPREFIX + k)
8708                 all_added.extend(added_favorites)
8709                 all_added.sort()
8710                 for a in all_added:
8711                         print ">>> Recording %s in \"world\" favorites file..." % \
8712                                 colorize("INFORM", str(a))
8713                 if all_added:
8714                         world_set.update(all_added)
8715
8716                 if world_locked:
8717                         world_set.unlock()
8718
8719         def loadResumeCommand(self, resume_data, skip_masked=False):
8720                 """
8721                 Add a resume command to the graph and validate it in the process.  This
8722                 will raise a PackageNotFound exception if a package is not available.
8723                 """
8724
8725                 if not isinstance(resume_data, dict):
8726                         return False
8727
8728                 mergelist = resume_data.get("mergelist")
8729                 if not isinstance(mergelist, list):
8730                         mergelist = []
8731
8732                 fakedb = self.mydbapi
8733                 trees = self.trees
8734                 serialized_tasks = []
8735                 masked_tasks = []
8736                 for x in mergelist:
8737                         if not (isinstance(x, list) and len(x) == 4):
8738                                 continue
8739                         pkg_type, myroot, pkg_key, action = x
8740                         if pkg_type not in self.pkg_tree_map:
8741                                 continue
8742                         if action != "merge":
8743                                 continue
8744                         tree_type = self.pkg_tree_map[pkg_type]
8745                         mydb = trees[myroot][tree_type].dbapi
8746                         db_keys = list(self._trees_orig[myroot][
8747                                 tree_type].dbapi._aux_cache_keys)
8748                         try:
8749                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8750                         except KeyError:
8751                                 # It does no exist or it is corrupt.
8752                                 if action == "uninstall":
8753                                         continue
8754                                 raise portage.exception.PackageNotFound(pkg_key)
8755                         installed = action == "uninstall"
8756                         built = pkg_type != "ebuild"
8757                         root_config = self.roots[myroot]
8758                         pkg = Package(built=built, cpv=pkg_key,
8759                                 installed=installed, metadata=metadata,
8760                                 operation=action, root_config=root_config,
8761                                 type_name=pkg_type)
8762                         if pkg_type == "ebuild":
8763                                 pkgsettings = self.pkgsettings[myroot]
8764                                 pkgsettings.setcpv(pkg)
8765                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8766                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8767                         self._pkg_cache[pkg] = pkg
8768
8769                         root_config = self.roots[pkg.root]
8770                         if "merge" == pkg.operation and \
8771                                 not visible(root_config.settings, pkg):
8772                                 if skip_masked:
8773                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8774                                 else:
8775                                         self._unsatisfied_deps_for_display.append(
8776                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8777
8778                         fakedb[myroot].cpv_inject(pkg)
8779                         serialized_tasks.append(pkg)
8780                         self.spinner.update()
8781
8782                 if self._unsatisfied_deps_for_display:
8783                         return False
8784
8785                 if not serialized_tasks or "--nodeps" in self.myopts:
8786                         self._serialized_tasks_cache = serialized_tasks
8787                         self._scheduler_graph = self.digraph
8788                 else:
8789                         self._select_package = self._select_pkg_from_graph
8790                         self.myparams.add("selective")
8791                         # Always traverse deep dependencies in order to account for
8792                         # potentially unsatisfied dependencies of installed packages.
8793                         # This is necessary for correct --keep-going or --resume operation
8794                         # in case a package from a group of circularly dependent packages
8795                         # fails. In this case, a package which has recently been installed
8796                         # may have an unsatisfied circular dependency (pulled in by
8797                         # PDEPEND, for example). So, even though a package is already
8798                         # installed, it may not have all of it's dependencies satisfied, so
8799                         # it may not be usable. If such a package is in the subgraph of
8800                         # deep depenedencies of a scheduled build, that build needs to
8801                         # be cancelled. In order for this type of situation to be
8802                         # recognized, deep traversal of dependencies is required.
8803                         self.myparams.add("deep")
8804
8805                         favorites = resume_data.get("favorites")
8806                         args_set = self._sets["args"]
8807                         if isinstance(favorites, list):
8808                                 args = self._load_favorites(favorites)
8809                         else:
8810                                 args = []
8811
8812                         for task in serialized_tasks:
8813                                 if isinstance(task, Package) and \
8814                                         task.operation == "merge":
8815                                         if not self._add_pkg(task, None):
8816                                                 return False
8817
8818                         # Packages for argument atoms need to be explicitly
8819                         # added via _add_pkg() so that they are included in the
8820                         # digraph (needed at least for --tree display).
8821                         for arg in args:
8822                                 for atom in arg.set:
8823                                         pkg, existing_node = self._select_package(
8824                                                 arg.root_config.root, atom)
8825                                         if existing_node is None and \
8826                                                 pkg is not None:
8827                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8828                                                         root=pkg.root, parent=arg)):
8829                                                         return False
8830
8831                         # Allow unsatisfied deps here to avoid showing a masking
8832                         # message for an unsatisfied dep that isn't necessarily
8833                         # masked.
8834                         if not self._create_graph(allow_unsatisfied=True):
8835                                 return False
8836
8837                         unsatisfied_deps = []
8838                         for dep in self._unsatisfied_deps:
8839                                 if not isinstance(dep.parent, Package):
8840                                         continue
8841                                 if dep.parent.operation == "merge":
8842                                         unsatisfied_deps.append(dep)
8843                                         continue
8844
8845                                 # For unsatisfied deps of installed packages, only account for
8846                                 # them if they are in the subgraph of dependencies of a package
8847                                 # which is scheduled to be installed.
8848                                 unsatisfied_install = False
8849                                 traversed = set()
8850                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8851                                 while dep_stack:
8852                                         node = dep_stack.pop()
8853                                         if not isinstance(node, Package):
8854                                                 continue
8855                                         if node.operation == "merge":
8856                                                 unsatisfied_install = True
8857                                                 break
8858                                         if node in traversed:
8859                                                 continue
8860                                         traversed.add(node)
8861                                         dep_stack.extend(self.digraph.parent_nodes(node))
8862
8863                                 if unsatisfied_install:
8864                                         unsatisfied_deps.append(dep)
8865
8866                         if masked_tasks or unsatisfied_deps:
8867                                 # This probably means that a required package
8868                                 # was dropped via --skipfirst. It makes the
8869                                 # resume list invalid, so convert it to a
8870                                 # UnsatisfiedResumeDep exception.
8871                                 raise self.UnsatisfiedResumeDep(self,
8872                                         masked_tasks + unsatisfied_deps)
8873                         self._serialized_tasks_cache = None
8874                         try:
8875                                 self.altlist()
8876                         except self._unknown_internal_error:
8877                                 return False
8878
8879                 return True
8880
8881         def _load_favorites(self, favorites):
8882                 """
8883                 Use a list of favorites to resume state from a
8884                 previous select_files() call. This creates similar
8885                 DependencyArg instances to those that would have
8886                 been created by the original select_files() call.
8887                 This allows Package instances to be matched with
8888                 DependencyArg instances during graph creation.
8889                 """
8890                 root_config = self.roots[self.target_root]
8891                 getSetAtoms = root_config.setconfig.getSetAtoms
8892                 sets = root_config.sets
8893                 args = []
8894                 for x in favorites:
8895                         if not isinstance(x, basestring):
8896                                 continue
8897                         if x in ("system", "world"):
8898                                 x = SETPREFIX + x
8899                         if x.startswith(SETPREFIX):
8900                                 s = x[len(SETPREFIX):]
8901                                 if s not in sets:
8902                                         continue
8903                                 if s in self._sets:
8904                                         continue
8905                                 # Recursively expand sets so that containment tests in
8906                                 # self._get_parent_sets() properly match atoms in nested
8907                                 # sets (like if world contains system).
8908                                 expanded_set = InternalPackageSet(
8909                                         initial_atoms=getSetAtoms(s))
8910                                 self._sets[s] = expanded_set
8911                                 args.append(SetArg(arg=x, set=expanded_set,
8912                                         root_config=root_config))
8913                         else:
8914                                 if not portage.isvalidatom(x):
8915                                         continue
8916                                 args.append(AtomArg(arg=x, atom=x,
8917                                         root_config=root_config))
8918
8919                 self._set_args(args)
8920                 return args
8921
8922         class UnsatisfiedResumeDep(portage.exception.PortageException):
8923                 """
8924                 A dependency of a resume list is not installed. This
8925                 can occur when a required package is dropped from the
8926                 merge list via --skipfirst.
8927                 """
8928                 def __init__(self, depgraph, value):
8929                         portage.exception.PortageException.__init__(self, value)
8930                         self.depgraph = depgraph
8931
8932         class _internal_exception(portage.exception.PortageException):
8933                 def __init__(self, value=""):
8934                         portage.exception.PortageException.__init__(self, value)
8935
8936         class _unknown_internal_error(_internal_exception):
8937                 """
8938                 Used by the depgraph internally to terminate graph creation.
8939                 The specific reason for the failure should have been dumped
8940                 to stderr, unfortunately, the exact reason for the failure
8941                 may not be known.
8942                 """
8943
8944         class _serialize_tasks_retry(_internal_exception):
8945                 """
8946                 This is raised by the _serialize_tasks() method when it needs to
8947                 be called again for some reason. The only case that it's currently
8948                 used for is when neglected dependencies need to be added to the
8949                 graph in order to avoid making a potentially unsafe decision.
8950                 """
8951
8952         class _dep_check_composite_db(portage.dbapi):
8953                 """
8954                 A dbapi-like interface that is optimized for use in dep_check() calls.
8955                 This is built on top of the existing depgraph package selection logic.
8956                 Some packages that have been added to the graph may be masked from this
8957                 view in order to influence the atom preference selection that occurs
8958                 via dep_check().
8959                 """
8960                 def __init__(self, depgraph, root):
8961                         portage.dbapi.__init__(self)
8962                         self._depgraph = depgraph
8963                         self._root = root
8964                         self._match_cache = {}
8965                         self._cpv_pkg_map = {}
8966
8967                 def _clear_cache(self):
8968                         self._match_cache.clear()
8969                         self._cpv_pkg_map.clear()
8970
8971                 def match(self, atom):
8972                         ret = self._match_cache.get(atom)
8973                         if ret is not None:
8974                                 return ret[:]
8975                         orig_atom = atom
8976                         if "/" not in atom:
8977                                 atom = self._dep_expand(atom)
8978                         pkg, existing = self._depgraph._select_package(self._root, atom)
8979                         if not pkg:
8980                                 ret = []
8981                         else:
8982                                 # Return the highest available from select_package() as well as
8983                                 # any matching slots in the graph db.
8984                                 slots = set()
8985                                 slots.add(pkg.metadata["SLOT"])
8986                                 atom_cp = portage.dep_getkey(atom)
8987                                 if pkg.cp.startswith("virtual/"):
8988                                         # For new-style virtual lookahead that occurs inside
8989                                         # dep_check(), examine all slots. This is needed
8990                                         # so that newer slots will not unnecessarily be pulled in
8991                                         # when a satisfying lower slot is already installed. For
8992                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8993                                         # there's no need to pull in a newer slot to satisfy a
8994                                         # virtual/jdk dependency.
8995                                         for db, pkg_type, built, installed, db_keys in \
8996                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8997                                                 for cpv in db.match(atom):
8998                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8999                                                                 continue
9000                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9001                                 ret = []
9002                                 if self._visible(pkg):
9003                                         self._cpv_pkg_map[pkg.cpv] = pkg
9004                                         ret.append(pkg.cpv)
9005                                 slots.remove(pkg.metadata["SLOT"])
9006                                 while slots:
9007                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9008                                         pkg, existing = self._depgraph._select_package(
9009                                                 self._root, slot_atom)
9010                                         if not pkg:
9011                                                 continue
9012                                         if not self._visible(pkg):
9013                                                 continue
9014                                         self._cpv_pkg_map[pkg.cpv] = pkg
9015                                         ret.append(pkg.cpv)
9016                                 if ret:
9017                                         self._cpv_sort_ascending(ret)
9018                         self._match_cache[orig_atom] = ret
9019                         return ret[:]
9020
9021                 def _visible(self, pkg):
9022                         if pkg.installed and "selective" not in self._depgraph.myparams:
9023                                 try:
9024                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9025                                 except (StopIteration, portage.exception.InvalidDependString):
9026                                         arg = None
9027                                 if arg:
9028                                         return False
9029                         if pkg.installed:
9030                                 try:
9031                                         if not visible(
9032                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9033                                                 return False
9034                                 except portage.exception.InvalidDependString:
9035                                         pass
9036                         in_graph = self._depgraph._slot_pkg_map[
9037                                 self._root].get(pkg.slot_atom)
9038                         if in_graph is None:
9039                                 # Mask choices for packages which are not the highest visible
9040                                 # version within their slot (since they usually trigger slot
9041                                 # conflicts).
9042                                 highest_visible, in_graph = self._depgraph._select_package(
9043                                         self._root, pkg.slot_atom)
9044                                 if pkg != highest_visible:
9045                                         return False
9046                         elif in_graph != pkg:
9047                                 # Mask choices for packages that would trigger a slot
9048                                 # conflict with a previously selected package.
9049                                 return False
9050                         return True
9051
9052                 def _dep_expand(self, atom):
9053                         """
9054                         This is only needed for old installed packages that may
9055                         contain atoms that are not fully qualified with a specific
9056                         category. Emulate the cpv_expand() function that's used by
9057                         dbapi.match() in cases like this. If there are multiple
9058                         matches, it's often due to a new-style virtual that has
9059                         been added, so try to filter those out to avoid raising
9060                         a ValueError.
9061                         """
9062                         root_config = self._depgraph.roots[self._root]
9063                         orig_atom = atom
9064                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9065                         if len(expanded_atoms) > 1:
9066                                 non_virtual_atoms = []
9067                                 for x in expanded_atoms:
9068                                         if not portage.dep_getkey(x).startswith("virtual/"):
9069                                                 non_virtual_atoms.append(x)
9070                                 if len(non_virtual_atoms) == 1:
9071                                         expanded_atoms = non_virtual_atoms
9072                         if len(expanded_atoms) > 1:
9073                                 # compatible with portage.cpv_expand()
9074                                 raise portage.exception.AmbiguousPackageName(
9075                                         [portage.dep_getkey(x) for x in expanded_atoms])
9076                         if expanded_atoms:
9077                                 atom = expanded_atoms[0]
9078                         else:
9079                                 null_atom = insert_category_into_atom(atom, "null")
9080                                 null_cp = portage.dep_getkey(null_atom)
9081                                 cat, atom_pn = portage.catsplit(null_cp)
9082                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9083                                 if virts_p:
9084                                         # Allow the resolver to choose which virtual.
9085                                         atom = insert_category_into_atom(atom, "virtual")
9086                                 else:
9087                                         atom = insert_category_into_atom(atom, "null")
9088                         return atom
9089
9090                 def aux_get(self, cpv, wants):
9091                         metadata = self._cpv_pkg_map[cpv].metadata
9092                         return [metadata.get(x, "") for x in wants]
9093
9094 class RepoDisplay(object):
9095         def __init__(self, roots):
9096                 self._shown_repos = {}
9097                 self._unknown_repo = False
9098                 repo_paths = set()
9099                 for root_config in roots.itervalues():
9100                         portdir = root_config.settings.get("PORTDIR")
9101                         if portdir:
9102                                 repo_paths.add(portdir)
9103                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9104                         if overlays:
9105                                 repo_paths.update(overlays.split())
9106                 repo_paths = list(repo_paths)
9107                 self._repo_paths = repo_paths
9108                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9109                         for repo_path in repo_paths ]
9110
9111                 # pre-allocate index for PORTDIR so that it always has index 0.
9112                 for root_config in roots.itervalues():
9113                         portdb = root_config.trees["porttree"].dbapi
9114                         portdir = portdb.porttree_root
9115                         if portdir:
9116                                 self.repoStr(portdir)
9117
9118         def repoStr(self, repo_path_real):
9119                 real_index = -1
9120                 if repo_path_real:
9121                         real_index = self._repo_paths_real.index(repo_path_real)
9122                 if real_index == -1:
9123                         s = "?"
9124                         self._unknown_repo = True
9125                 else:
9126                         shown_repos = self._shown_repos
9127                         repo_paths = self._repo_paths
9128                         repo_path = repo_paths[real_index]
9129                         index = shown_repos.get(repo_path)
9130                         if index is None:
9131                                 index = len(shown_repos)
9132                                 shown_repos[repo_path] = index
9133                         s = str(index)
9134                 return s
9135
9136         def __str__(self):
9137                 output = []
9138                 shown_repos = self._shown_repos
9139                 unknown_repo = self._unknown_repo
9140                 if shown_repos or self._unknown_repo:
9141                         output.append("Portage tree and overlays:\n")
9142                 show_repo_paths = list(shown_repos)
9143                 for repo_path, repo_index in shown_repos.iteritems():
9144                         show_repo_paths[repo_index] = repo_path
9145                 if show_repo_paths:
9146                         for index, repo_path in enumerate(show_repo_paths):
9147                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9148                 if unknown_repo:
9149                         output.append(" "+teal("[?]") + \
9150                                 " indicates that the source repository could not be determined\n")
9151                 return "".join(output)
9152
9153 class PackageCounters(object):
9154
9155         def __init__(self):
9156                 self.upgrades   = 0
9157                 self.downgrades = 0
9158                 self.new        = 0
9159                 self.newslot    = 0
9160                 self.reinst     = 0
9161                 self.uninst     = 0
9162                 self.blocks     = 0
9163                 self.blocks_satisfied         = 0
9164                 self.totalsize  = 0
9165                 self.restrict_fetch           = 0
9166                 self.restrict_fetch_satisfied = 0
9167                 self.interactive              = 0
9168
9169         def __str__(self):
9170                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9171                 myoutput = []
9172                 details = []
9173                 myoutput.append("Total: %s package" % total_installs)
9174                 if total_installs != 1:
9175                         myoutput.append("s")
9176                 if total_installs != 0:
9177                         myoutput.append(" (")
9178                 if self.upgrades > 0:
9179                         details.append("%s upgrade" % self.upgrades)
9180                         if self.upgrades > 1:
9181                                 details[-1] += "s"
9182                 if self.downgrades > 0:
9183                         details.append("%s downgrade" % self.downgrades)
9184                         if self.downgrades > 1:
9185                                 details[-1] += "s"
9186                 if self.new > 0:
9187                         details.append("%s new" % self.new)
9188                 if self.newslot > 0:
9189                         details.append("%s in new slot" % self.newslot)
9190                         if self.newslot > 1:
9191                                 details[-1] += "s"
9192                 if self.reinst > 0:
9193                         details.append("%s reinstall" % self.reinst)
9194                         if self.reinst > 1:
9195                                 details[-1] += "s"
9196                 if self.uninst > 0:
9197                         details.append("%s uninstall" % self.uninst)
9198                         if self.uninst > 1:
9199                                 details[-1] += "s"
9200                 if self.interactive > 0:
9201                         details.append("%s %s" % (self.interactive,
9202                                 colorize("WARN", "interactive")))
9203                 myoutput.append(", ".join(details))
9204                 if total_installs != 0:
9205                         myoutput.append(")")
9206                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9207                 if self.restrict_fetch:
9208                         myoutput.append("\nFetch Restriction: %s package" % \
9209                                 self.restrict_fetch)
9210                         if self.restrict_fetch > 1:
9211                                 myoutput.append("s")
9212                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9213                         myoutput.append(bad(" (%s unsatisfied)") % \
9214                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9215                 if self.blocks > 0:
9216                         myoutput.append("\nConflict: %s block" % \
9217                                 self.blocks)
9218                         if self.blocks > 1:
9219                                 myoutput.append("s")
9220                         if self.blocks_satisfied < self.blocks:
9221                                 myoutput.append(bad(" (%s unsatisfied)") % \
9222                                         (self.blocks - self.blocks_satisfied))
9223                 return "".join(myoutput)
9224
9225 class PollSelectAdapter(PollConstants):
9226
9227         """
9228         Use select to emulate a poll object, for
9229         systems that don't support poll().
9230         """
9231
9232         def __init__(self):
9233                 self._registered = {}
9234                 self._select_args = [[], [], []]
9235
9236         def register(self, fd, *args):
9237                 """
9238                 Only POLLIN is currently supported!
9239                 """
9240                 if len(args) > 1:
9241                         raise TypeError(
9242                                 "register expected at most 2 arguments, got " + \
9243                                 repr(1 + len(args)))
9244
9245                 eventmask = PollConstants.POLLIN | \
9246                         PollConstants.POLLPRI | PollConstants.POLLOUT
9247                 if args:
9248                         eventmask = args[0]
9249
9250                 self._registered[fd] = eventmask
9251                 self._select_args = None
9252
9253         def unregister(self, fd):
9254                 self._select_args = None
9255                 del self._registered[fd]
9256
9257         def poll(self, *args):
9258                 if len(args) > 1:
9259                         raise TypeError(
9260                                 "poll expected at most 2 arguments, got " + \
9261                                 repr(1 + len(args)))
9262
9263                 timeout = None
9264                 if args:
9265                         timeout = args[0]
9266
9267                 select_args = self._select_args
9268                 if select_args is None:
9269                         select_args = [self._registered.keys(), [], []]
9270
9271                 if timeout is not None:
9272                         select_args = select_args[:]
9273                         # Translate poll() timeout args to select() timeout args:
9274                         #
9275                         #          | units        | value(s) for indefinite block
9276                         # ---------|--------------|------------------------------
9277                         #   poll   | milliseconds | omitted, negative, or None
9278                         # ---------|--------------|------------------------------
9279                         #   select | seconds      | omitted
9280                         # ---------|--------------|------------------------------
9281
9282                         if timeout is not None and timeout < 0:
9283                                 timeout = None
9284                         if timeout is not None:
9285                                 select_args.append(timeout / 1000)
9286
9287                 select_events = select.select(*select_args)
9288                 poll_events = []
9289                 for fd in select_events[0]:
9290                         poll_events.append((fd, PollConstants.POLLIN))
9291                 return poll_events
9292
9293 class SequentialTaskQueue(SlotObject):
9294
9295         __slots__ = ("max_jobs", "running_tasks") + \
9296                 ("_dirty", "_scheduling", "_task_queue")
9297
9298         def __init__(self, **kwargs):
9299                 SlotObject.__init__(self, **kwargs)
9300                 self._task_queue = deque()
9301                 self.running_tasks = set()
9302                 if self.max_jobs is None:
9303                         self.max_jobs = 1
9304                 self._dirty = True
9305
9306         def add(self, task):
9307                 self._task_queue.append(task)
9308                 self._dirty = True
9309
9310         def addFront(self, task):
9311                 self._task_queue.appendleft(task)
9312                 self._dirty = True
9313
9314         def schedule(self):
9315
9316                 if not self._dirty:
9317                         return False
9318
9319                 if not self:
9320                         return False
9321
9322                 if self._scheduling:
9323                         # Ignore any recursive schedule() calls triggered via
9324                         # self._task_exit().
9325                         return False
9326
9327                 self._scheduling = True
9328
9329                 task_queue = self._task_queue
9330                 running_tasks = self.running_tasks
9331                 max_jobs = self.max_jobs
9332                 state_changed = False
9333
9334                 while task_queue and \
9335                         (max_jobs is True or len(running_tasks) < max_jobs):
9336                         task = task_queue.popleft()
9337                         cancelled = getattr(task, "cancelled", None)
9338                         if not cancelled:
9339                                 running_tasks.add(task)
9340                                 task.addExitListener(self._task_exit)
9341                                 task.start()
9342                         state_changed = True
9343
9344                 self._dirty = False
9345                 self._scheduling = False
9346
9347                 return state_changed
9348
9349         def _task_exit(self, task):
9350                 """
9351                 Since we can always rely on exit listeners being called, the set of
9352                 running tasks is always pruned automatically and there is never any need
9353                 to actively prune it.
9354                 """
9355                 self.running_tasks.remove(task)
9356                 if self._task_queue:
9357                         self._dirty = True
9358
9359         def clear(self):
9360                 self._task_queue.clear()
9361                 running_tasks = self.running_tasks
9362                 while running_tasks:
9363                         task = running_tasks.pop()
9364                         task.removeExitListener(self._task_exit)
9365                         task.cancel()
9366                 self._dirty = False
9367
9368         def __nonzero__(self):
9369                 return bool(self._task_queue or self.running_tasks)
9370
9371         def __len__(self):
9372                 return len(self._task_queue) + len(self.running_tasks)
9373
9374 _can_poll_device = None
9375
9376 def can_poll_device():
9377         """
9378         Test if it's possible to use poll() on a device such as a pty. This
9379         is known to fail on Darwin.
9380         @rtype: bool
9381         @returns: True if poll() on a device succeeds, False otherwise.
9382         """
9383
9384         global _can_poll_device
9385         if _can_poll_device is not None:
9386                 return _can_poll_device
9387
9388         if not hasattr(select, "poll"):
9389                 _can_poll_device = False
9390                 return _can_poll_device
9391
9392         try:
9393                 dev_null = open('/dev/null', 'rb')
9394         except IOError:
9395                 _can_poll_device = False
9396                 return _can_poll_device
9397
9398         p = select.poll()
9399         p.register(dev_null.fileno(), PollConstants.POLLIN)
9400
9401         invalid_request = False
9402         for f, event in p.poll():
9403                 if event & PollConstants.POLLNVAL:
9404                         invalid_request = True
9405                         break
9406         dev_null.close()
9407
9408         _can_poll_device = not invalid_request
9409         return _can_poll_device
9410
9411 def create_poll_instance():
9412         """
9413         Create an instance of select.poll, or an instance of
9414         PollSelectAdapter there is no poll() implementation or
9415         it is broken somehow.
9416         """
9417         if can_poll_device():
9418                 return select.poll()
9419         return PollSelectAdapter()
9420
9421 getloadavg = getattr(os, "getloadavg", None)
9422 if getloadavg is None:
9423         def getloadavg():
9424                 """
9425                 Uses /proc/loadavg to emulate os.getloadavg().
9426                 Raises OSError if the load average was unobtainable.
9427                 """
9428                 try:
9429                         loadavg_str = open('/proc/loadavg').readline()
9430                 except IOError:
9431                         # getloadavg() is only supposed to raise OSError, so convert
9432                         raise OSError('unknown')
9433                 loadavg_split = loadavg_str.split()
9434                 if len(loadavg_split) < 3:
9435                         raise OSError('unknown')
9436                 loadavg_floats = []
9437                 for i in xrange(3):
9438                         try:
9439                                 loadavg_floats.append(float(loadavg_split[i]))
9440                         except ValueError:
9441                                 raise OSError('unknown')
9442                 return tuple(loadavg_floats)
9443
9444 class PollScheduler(object):
9445
9446         class _sched_iface_class(SlotObject):
9447                 __slots__ = ("register", "schedule", "unregister")
9448
9449         def __init__(self):
9450                 self._max_jobs = 1
9451                 self._max_load = None
9452                 self._jobs = 0
9453                 self._poll_event_queue = []
9454                 self._poll_event_handlers = {}
9455                 self._poll_event_handler_ids = {}
9456                 # Increment id for each new handler.
9457                 self._event_handler_id = 0
9458                 self._poll_obj = create_poll_instance()
9459                 self._scheduling = False
9460
9461         def _schedule(self):
9462                 """
9463                 Calls _schedule_tasks() and automatically returns early from
9464                 any recursive calls to this method that the _schedule_tasks()
9465                 call might trigger. This makes _schedule() safe to call from
9466                 inside exit listeners.
9467                 """
9468                 if self._scheduling:
9469                         return False
9470                 self._scheduling = True
9471                 try:
9472                         return self._schedule_tasks()
9473                 finally:
9474                         self._scheduling = False
9475
9476         def _running_job_count(self):
9477                 return self._jobs
9478
9479         def _can_add_job(self):
9480                 max_jobs = self._max_jobs
9481                 max_load = self._max_load
9482
9483                 if self._max_jobs is not True and \
9484                         self._running_job_count() >= self._max_jobs:
9485                         return False
9486
9487                 if max_load is not None and \
9488                         (max_jobs is True or max_jobs > 1) and \
9489                         self._running_job_count() >= 1:
9490                         try:
9491                                 avg1, avg5, avg15 = getloadavg()
9492                         except OSError:
9493                                 return False
9494
9495                         if avg1 >= max_load:
9496                                 return False
9497
9498                 return True
9499
9500         def _poll(self, timeout=None):
9501                 """
9502                 All poll() calls pass through here. The poll events
9503                 are added directly to self._poll_event_queue.
9504                 In order to avoid endless blocking, this raises
9505                 StopIteration if timeout is None and there are
9506                 no file descriptors to poll.
9507                 """
9508                 if not self._poll_event_handlers:
9509                         self._schedule()
9510                         if timeout is None and \
9511                                 not self._poll_event_handlers:
9512                                 raise StopIteration(
9513                                         "timeout is None and there are no poll() event handlers")
9514
9515                 # The following error is known to occur with Linux kernel versions
9516                 # less than 2.6.24:
9517                 #
9518                 #   select.error: (4, 'Interrupted system call')
9519                 #
9520                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9521                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9522                 # without any events.
9523                 while True:
9524                         try:
9525                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9526                                 break
9527                         except select.error, e:
9528                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9529                                         level=logging.ERROR, noiselevel=-1)
9530                                 del e
9531                                 if timeout is not None:
9532                                         break
9533
9534         def _next_poll_event(self, timeout=None):
9535                 """
9536                 Since the _schedule_wait() loop is called by event
9537                 handlers from _poll_loop(), maintain a central event
9538                 queue for both of them to share events from a single
9539                 poll() call. In order to avoid endless blocking, this
9540                 raises StopIteration if timeout is None and there are
9541                 no file descriptors to poll.
9542                 """
9543                 if not self._poll_event_queue:
9544                         self._poll(timeout)
9545                 return self._poll_event_queue.pop()
9546
9547         def _poll_loop(self):
9548
9549                 event_handlers = self._poll_event_handlers
9550                 event_handled = False
9551
9552                 try:
9553                         while event_handlers:
9554                                 f, event = self._next_poll_event()
9555                                 handler, reg_id = event_handlers[f]
9556                                 handler(f, event)
9557                                 event_handled = True
9558                 except StopIteration:
9559                         event_handled = True
9560
9561                 if not event_handled:
9562                         raise AssertionError("tight loop")
9563
9564         def _schedule_yield(self):
9565                 """
9566                 Schedule for a short period of time chosen by the scheduler based
9567                 on internal state. Synchronous tasks should call this periodically
9568                 in order to allow the scheduler to service pending poll events. The
9569                 scheduler will call poll() exactly once, without blocking, and any
9570                 resulting poll events will be serviced.
9571                 """
9572                 event_handlers = self._poll_event_handlers
9573                 events_handled = 0
9574
9575                 if not event_handlers:
9576                         return bool(events_handled)
9577
9578                 if not self._poll_event_queue:
9579                         self._poll(0)
9580
9581                 try:
9582                         while event_handlers and self._poll_event_queue:
9583                                 f, event = self._next_poll_event()
9584                                 handler, reg_id = event_handlers[f]
9585                                 handler(f, event)
9586                                 events_handled += 1
9587                 except StopIteration:
9588                         events_handled += 1
9589
9590                 return bool(events_handled)
9591
9592         def _register(self, f, eventmask, handler):
9593                 """
9594                 @rtype: Integer
9595                 @return: A unique registration id, for use in schedule() or
9596                         unregister() calls.
9597                 """
9598                 if f in self._poll_event_handlers:
9599                         raise AssertionError("fd %d is already registered" % f)
9600                 self._event_handler_id += 1
9601                 reg_id = self._event_handler_id
9602                 self._poll_event_handler_ids[reg_id] = f
9603                 self._poll_event_handlers[f] = (handler, reg_id)
9604                 self._poll_obj.register(f, eventmask)
9605                 return reg_id
9606
9607         def _unregister(self, reg_id):
9608                 f = self._poll_event_handler_ids[reg_id]
9609                 self._poll_obj.unregister(f)
9610                 del self._poll_event_handlers[f]
9611                 del self._poll_event_handler_ids[reg_id]
9612
9613         def _schedule_wait(self, wait_ids):
9614                 """
9615                 Schedule until wait_id is not longer registered
9616                 for poll() events.
9617                 @type wait_id: int
9618                 @param wait_id: a task id to wait for
9619                 """
9620                 event_handlers = self._poll_event_handlers
9621                 handler_ids = self._poll_event_handler_ids
9622                 event_handled = False
9623
9624                 if isinstance(wait_ids, int):
9625                         wait_ids = frozenset([wait_ids])
9626
9627                 try:
9628                         while wait_ids.intersection(handler_ids):
9629                                 f, event = self._next_poll_event()
9630                                 handler, reg_id = event_handlers[f]
9631                                 handler(f, event)
9632                                 event_handled = True
9633                 except StopIteration:
9634                         event_handled = True
9635
9636                 return event_handled
9637
9638 class QueueScheduler(PollScheduler):
9639
9640         """
9641         Add instances of SequentialTaskQueue and then call run(). The
9642         run() method returns when no tasks remain.
9643         """
9644
9645         def __init__(self, max_jobs=None, max_load=None):
9646                 PollScheduler.__init__(self)
9647
9648                 if max_jobs is None:
9649                         max_jobs = 1
9650
9651                 self._max_jobs = max_jobs
9652                 self._max_load = max_load
9653                 self.sched_iface = self._sched_iface_class(
9654                         register=self._register,
9655                         schedule=self._schedule_wait,
9656                         unregister=self._unregister)
9657
9658                 self._queues = []
9659                 self._schedule_listeners = []
9660
9661         def add(self, q):
9662                 self._queues.append(q)
9663
9664         def remove(self, q):
9665                 self._queues.remove(q)
9666
9667         def run(self):
9668
9669                 while self._schedule():
9670                         self._poll_loop()
9671
9672                 while self._running_job_count():
9673                         self._poll_loop()
9674
9675         def _schedule_tasks(self):
9676                 """
9677                 @rtype: bool
9678                 @returns: True if there may be remaining tasks to schedule,
9679                         False otherwise.
9680                 """
9681                 while self._can_add_job():
9682                         n = self._max_jobs - self._running_job_count()
9683                         if n < 1:
9684                                 break
9685
9686                         if not self._start_next_job(n):
9687                                 return False
9688
9689                 for q in self._queues:
9690                         if q:
9691                                 return True
9692                 return False
9693
9694         def _running_job_count(self):
9695                 job_count = 0
9696                 for q in self._queues:
9697                         job_count += len(q.running_tasks)
9698                 self._jobs = job_count
9699                 return job_count
9700
9701         def _start_next_job(self, n=1):
9702                 started_count = 0
9703                 for q in self._queues:
9704                         initial_job_count = len(q.running_tasks)
9705                         q.schedule()
9706                         final_job_count = len(q.running_tasks)
9707                         if final_job_count > initial_job_count:
9708                                 started_count += (final_job_count - initial_job_count)
9709                         if started_count >= n:
9710                                 break
9711                 return started_count
9712
9713 class TaskScheduler(object):
9714
9715         """
9716         A simple way to handle scheduling of AsynchrousTask instances. Simply
9717         add tasks and call run(). The run() method returns when no tasks remain.
9718         """
9719
9720         def __init__(self, max_jobs=None, max_load=None):
9721                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9722                 self._scheduler = QueueScheduler(
9723                         max_jobs=max_jobs, max_load=max_load)
9724                 self.sched_iface = self._scheduler.sched_iface
9725                 self.run = self._scheduler.run
9726                 self._scheduler.add(self._queue)
9727
9728         def add(self, task):
9729                 self._queue.add(task)
9730
9731 class JobStatusDisplay(object):
9732
9733         _bound_properties = ("curval", "failed", "running")
9734         _jobs_column_width = 48
9735
9736         # Don't update the display unless at least this much
9737         # time has passed, in units of seconds.
9738         _min_display_latency = 2
9739
9740         _default_term_codes = {
9741                 'cr'  : '\r',
9742                 'el'  : '\x1b[K',
9743                 'nel' : '\n',
9744         }
9745
9746         _termcap_name_map = {
9747                 'carriage_return' : 'cr',
9748                 'clr_eol'         : 'el',
9749                 'newline'         : 'nel',
9750         }
9751
9752         def __init__(self, out=sys.stdout, quiet=False):
9753                 object.__setattr__(self, "out", out)
9754                 object.__setattr__(self, "quiet", quiet)
9755                 object.__setattr__(self, "maxval", 0)
9756                 object.__setattr__(self, "merges", 0)
9757                 object.__setattr__(self, "_changed", False)
9758                 object.__setattr__(self, "_displayed", False)
9759                 object.__setattr__(self, "_last_display_time", 0)
9760                 object.__setattr__(self, "width", 80)
9761                 self.reset()
9762
9763                 isatty = hasattr(out, "isatty") and out.isatty()
9764                 object.__setattr__(self, "_isatty", isatty)
9765                 if not isatty or not self._init_term():
9766                         term_codes = {}
9767                         for k, capname in self._termcap_name_map.iteritems():
9768                                 term_codes[k] = self._default_term_codes[capname]
9769                         object.__setattr__(self, "_term_codes", term_codes)
9770                 encoding = sys.getdefaultencoding()
9771                 for k, v in self._term_codes.items():
9772                         if not isinstance(v, basestring):
9773                                 self._term_codes[k] = v.decode(encoding, 'replace')
9774
9775         def _init_term(self):
9776                 """
9777                 Initialize term control codes.
9778                 @rtype: bool
9779                 @returns: True if term codes were successfully initialized,
9780                         False otherwise.
9781                 """
9782
9783                 term_type = os.environ.get("TERM", "vt100")
9784                 tigetstr = None
9785
9786                 try:
9787                         import curses
9788                         try:
9789                                 curses.setupterm(term_type, self.out.fileno())
9790                                 tigetstr = curses.tigetstr
9791                         except curses.error:
9792                                 pass
9793                 except ImportError:
9794                         pass
9795
9796                 if tigetstr is None:
9797                         return False
9798
9799                 term_codes = {}
9800                 for k, capname in self._termcap_name_map.iteritems():
9801                         code = tigetstr(capname)
9802                         if code is None:
9803                                 code = self._default_term_codes[capname]
9804                         term_codes[k] = code
9805                 object.__setattr__(self, "_term_codes", term_codes)
9806                 return True
9807
9808         def _format_msg(self, msg):
9809                 return ">>> %s" % msg
9810
9811         def _erase(self):
9812                 self.out.write(
9813                         self._term_codes['carriage_return'] + \
9814                         self._term_codes['clr_eol'])
9815                 self.out.flush()
9816                 self._displayed = False
9817
9818         def _display(self, line):
9819                 self.out.write(line)
9820                 self.out.flush()
9821                 self._displayed = True
9822
9823         def _update(self, msg):
9824
9825                 out = self.out
9826                 if not self._isatty:
9827                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9828                         self.out.flush()
9829                         self._displayed = True
9830                         return
9831
9832                 if self._displayed:
9833                         self._erase()
9834
9835                 self._display(self._format_msg(msg))
9836
9837         def displayMessage(self, msg):
9838
9839                 was_displayed = self._displayed
9840
9841                 if self._isatty and self._displayed:
9842                         self._erase()
9843
9844                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9845                 self.out.flush()
9846                 self._displayed = False
9847
9848                 if was_displayed:
9849                         self._changed = True
9850                         self.display()
9851
9852         def reset(self):
9853                 self.maxval = 0
9854                 self.merges = 0
9855                 for name in self._bound_properties:
9856                         object.__setattr__(self, name, 0)
9857
9858                 if self._displayed:
9859                         self.out.write(self._term_codes['newline'])
9860                         self.out.flush()
9861                         self._displayed = False
9862
9863         def __setattr__(self, name, value):
9864                 old_value = getattr(self, name)
9865                 if value == old_value:
9866                         return
9867                 object.__setattr__(self, name, value)
9868                 if name in self._bound_properties:
9869                         self._property_change(name, old_value, value)
9870
9871         def _property_change(self, name, old_value, new_value):
9872                 self._changed = True
9873                 self.display()
9874
9875         def _load_avg_str(self):
9876                 try:
9877                         avg = getloadavg()
9878                 except OSError:
9879                         return 'unknown'
9880
9881                 max_avg = max(avg)
9882
9883                 if max_avg < 10:
9884                         digits = 2
9885                 elif max_avg < 100:
9886                         digits = 1
9887                 else:
9888                         digits = 0
9889
9890                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9891
9892         def display(self):
9893                 """
9894                 Display status on stdout, but only if something has
9895                 changed since the last call.
9896                 """
9897
9898                 if self.quiet:
9899                         return
9900
9901                 current_time = time.time()
9902                 time_delta = current_time - self._last_display_time
9903                 if self._displayed and \
9904                         not self._changed:
9905                         if not self._isatty:
9906                                 return
9907                         if time_delta < self._min_display_latency:
9908                                 return
9909
9910                 self._last_display_time = current_time
9911                 self._changed = False
9912                 self._display_status()
9913
9914         def _display_status(self):
9915                 # Don't use len(self._completed_tasks) here since that also
9916                 # can include uninstall tasks.
9917                 curval_str = str(self.curval)
9918                 maxval_str = str(self.maxval)
9919                 running_str = str(self.running)
9920                 failed_str = str(self.failed)
9921                 load_avg_str = self._load_avg_str()
9922
9923                 color_output = StringIO()
9924                 plain_output = StringIO()
9925                 style_file = portage.output.ConsoleStyleFile(color_output)
9926                 style_file.write_listener = plain_output
9927                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9928                 style_writer.style_listener = style_file.new_styles
9929                 f = formatter.AbstractFormatter(style_writer)
9930
9931                 number_style = "INFORM"
9932                 f.add_literal_data("Jobs: ")
9933                 f.push_style(number_style)
9934                 f.add_literal_data(curval_str)
9935                 f.pop_style()
9936                 f.add_literal_data(" of ")
9937                 f.push_style(number_style)
9938                 f.add_literal_data(maxval_str)
9939                 f.pop_style()
9940                 f.add_literal_data(" complete")
9941
9942                 if self.running:
9943                         f.add_literal_data(", ")
9944                         f.push_style(number_style)
9945                         f.add_literal_data(running_str)
9946                         f.pop_style()
9947                         f.add_literal_data(" running")
9948
9949                 if self.failed:
9950                         f.add_literal_data(", ")
9951                         f.push_style(number_style)
9952                         f.add_literal_data(failed_str)
9953                         f.pop_style()
9954                         f.add_literal_data(" failed")
9955
9956                 padding = self._jobs_column_width - len(plain_output.getvalue())
9957                 if padding > 0:
9958                         f.add_literal_data(padding * " ")
9959
9960                 f.add_literal_data("Load avg: ")
9961                 f.add_literal_data(load_avg_str)
9962
9963                 # Truncate to fit width, to avoid making the terminal scroll if the
9964                 # line overflows (happens when the load average is large).
9965                 plain_output = plain_output.getvalue()
9966                 if self._isatty and len(plain_output) > self.width:
9967                         # Use plain_output here since it's easier to truncate
9968                         # properly than the color output which contains console
9969                         # color codes.
9970                         self._update(plain_output[:self.width])
9971                 else:
9972                         self._update(color_output.getvalue())
9973
9974                 xtermTitle(" ".join(plain_output.split()))
9975
9976 class Scheduler(PollScheduler):
9977
9978         _opts_ignore_blockers = \
9979                 frozenset(["--buildpkgonly",
9980                 "--fetchonly", "--fetch-all-uri",
9981                 "--nodeps", "--pretend"])
9982
9983         _opts_no_background = \
9984                 frozenset(["--pretend",
9985                 "--fetchonly", "--fetch-all-uri"])
9986
9987         _opts_no_restart = frozenset(["--buildpkgonly",
9988                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9989
9990         _bad_resume_opts = set(["--ask", "--changelog",
9991                 "--resume", "--skipfirst"])
9992
9993         _fetch_log = "/var/log/emerge-fetch.log"
9994
9995         class _iface_class(SlotObject):
9996                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9997                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
9998                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9999                         "unregister")
10000
10001         class _fetch_iface_class(SlotObject):
10002                 __slots__ = ("log_file", "schedule")
10003
10004         _task_queues_class = slot_dict_class(
10005                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10006
10007         class _build_opts_class(SlotObject):
10008                 __slots__ = ("buildpkg", "buildpkgonly",
10009                         "fetch_all_uri", "fetchonly", "pretend")
10010
10011         class _binpkg_opts_class(SlotObject):
10012                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10013
10014         class _pkg_count_class(SlotObject):
10015                 __slots__ = ("curval", "maxval")
10016
10017         class _emerge_log_class(SlotObject):
10018                 __slots__ = ("xterm_titles",)
10019
10020                 def log(self, *pargs, **kwargs):
10021                         if not self.xterm_titles:
10022                                 # Avoid interference with the scheduler's status display.
10023                                 kwargs.pop("short_msg", None)
10024                         emergelog(self.xterm_titles, *pargs, **kwargs)
10025
10026         class _failed_pkg(SlotObject):
10027                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10028
10029         class _ConfigPool(object):
10030                 """Interface for a task to temporarily allocate a config
10031                 instance from a pool. This allows a task to be constructed
10032                 long before the config instance actually becomes needed, like
10033                 when prefetchers are constructed for the whole merge list."""
10034                 __slots__ = ("_root", "_allocate", "_deallocate")
10035                 def __init__(self, root, allocate, deallocate):
10036                         self._root = root
10037                         self._allocate = allocate
10038                         self._deallocate = deallocate
10039                 def allocate(self):
10040                         return self._allocate(self._root)
10041                 def deallocate(self, settings):
10042                         self._deallocate(settings)
10043
10044         class _unknown_internal_error(portage.exception.PortageException):
10045                 """
10046                 Used internally to terminate scheduling. The specific reason for
10047                 the failure should have been dumped to stderr.
10048                 """
10049                 def __init__(self, value=""):
10050                         portage.exception.PortageException.__init__(self, value)
10051
10052         def __init__(self, settings, trees, mtimedb, myopts,
10053                 spinner, mergelist, favorites, digraph):
10054                 PollScheduler.__init__(self)
10055                 self.settings = settings
10056                 self.target_root = settings["ROOT"]
10057                 self.trees = trees
10058                 self.myopts = myopts
10059                 self._spinner = spinner
10060                 self._mtimedb = mtimedb
10061                 self._mergelist = mergelist
10062                 self._favorites = favorites
10063                 self._args_set = InternalPackageSet(favorites)
10064                 self._build_opts = self._build_opts_class()
10065                 for k in self._build_opts.__slots__:
10066                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10067                 self._binpkg_opts = self._binpkg_opts_class()
10068                 for k in self._binpkg_opts.__slots__:
10069                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10070
10071                 self.curval = 0
10072                 self._logger = self._emerge_log_class()
10073                 self._task_queues = self._task_queues_class()
10074                 for k in self._task_queues.allowed_keys:
10075                         setattr(self._task_queues, k,
10076                                 SequentialTaskQueue())
10077
10078                 # Holds merges that will wait to be executed when no builds are
10079                 # executing. This is useful for system packages since dependencies
10080                 # on system packages are frequently unspecified.
10081                 self._merge_wait_queue = []
10082                 # Holds merges that have been transfered from the merge_wait_queue to
10083                 # the actual merge queue. They are removed from this list upon
10084                 # completion. Other packages can start building only when this list is
10085                 # empty.
10086                 self._merge_wait_scheduled = []
10087
10088                 # Holds system packages and their deep runtime dependencies. Before
10089                 # being merged, these packages go to merge_wait_queue, to be merged
10090                 # when no other packages are building.
10091                 self._deep_system_deps = set()
10092
10093                 # Holds packages to merge which will satisfy currently unsatisfied
10094                 # deep runtime dependencies of system packages. If this is not empty
10095                 # then no parallel builds will be spawned until it is empty. This
10096                 # minimizes the possibility that a build will fail due to the system
10097                 # being in a fragile state. For example, see bug #259954.
10098                 self._unsatisfied_system_deps = set()
10099
10100                 self._status_display = JobStatusDisplay()
10101                 self._max_load = myopts.get("--load-average")
10102                 max_jobs = myopts.get("--jobs")
10103                 if max_jobs is None:
10104                         max_jobs = 1
10105                 self._set_max_jobs(max_jobs)
10106
10107                 # The root where the currently running
10108                 # portage instance is installed.
10109                 self._running_root = trees["/"]["root_config"]
10110                 self.edebug = 0
10111                 if settings.get("PORTAGE_DEBUG", "") == "1":
10112                         self.edebug = 1
10113                 self.pkgsettings = {}
10114                 self._config_pool = {}
10115                 self._blocker_db = {}
10116                 for root in trees:
10117                         self._config_pool[root] = []
10118                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10119
10120                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10121                         schedule=self._schedule_fetch)
10122                 self._sched_iface = self._iface_class(
10123                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10124                         dblinkDisplayMerge=self._dblink_display_merge,
10125                         dblinkElog=self._dblink_elog,
10126                         dblinkEmergeLog=self._dblink_emerge_log,
10127                         fetch=fetch_iface, register=self._register,
10128                         schedule=self._schedule_wait,
10129                         scheduleSetup=self._schedule_setup,
10130                         scheduleUnpack=self._schedule_unpack,
10131                         scheduleYield=self._schedule_yield,
10132                         unregister=self._unregister)
10133
10134                 self._prefetchers = weakref.WeakValueDictionary()
10135                 self._pkg_queue = []
10136                 self._completed_tasks = set()
10137
10138                 self._failed_pkgs = []
10139                 self._failed_pkgs_all = []
10140                 self._failed_pkgs_die_msgs = []
10141                 self._post_mod_echo_msgs = []
10142                 self._parallel_fetch = False
10143                 merge_count = len([x for x in mergelist \
10144                         if isinstance(x, Package) and x.operation == "merge"])
10145                 self._pkg_count = self._pkg_count_class(
10146                         curval=0, maxval=merge_count)
10147                 self._status_display.maxval = self._pkg_count.maxval
10148
10149                 # The load average takes some time to respond when new
10150                 # jobs are added, so we need to limit the rate of adding
10151                 # new jobs.
10152                 self._job_delay_max = 10
10153                 self._job_delay_factor = 1.0
10154                 self._job_delay_exp = 1.5
10155                 self._previous_job_start_time = None
10156
10157                 self._set_digraph(digraph)
10158
10159                 # This is used to memoize the _choose_pkg() result when
10160                 # no packages can be chosen until one of the existing
10161                 # jobs completes.
10162                 self._choose_pkg_return_early = False
10163
10164                 features = self.settings.features
10165                 if "parallel-fetch" in features and \
10166                         not ("--pretend" in self.myopts or \
10167                         "--fetch-all-uri" in self.myopts or \
10168                         "--fetchonly" in self.myopts):
10169                         if "distlocks" not in features:
10170                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10171                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10172                                         "requires the distlocks feature enabled"+"\n",
10173                                         noiselevel=-1)
10174                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10175                                         "thus parallel-fetching is being disabled"+"\n",
10176                                         noiselevel=-1)
10177                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10178                         elif len(mergelist) > 1:
10179                                 self._parallel_fetch = True
10180
10181                 if self._parallel_fetch:
10182                                 # clear out existing fetch log if it exists
10183                                 try:
10184                                         open(self._fetch_log, 'w')
10185                                 except EnvironmentError:
10186                                         pass
10187
10188                 self._running_portage = None
10189                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10190                         portage.const.PORTAGE_PACKAGE_ATOM)
10191                 if portage_match:
10192                         cpv = portage_match.pop()
10193                         self._running_portage = self._pkg(cpv, "installed",
10194                                 self._running_root, installed=True)
10195
10196         def _poll(self, timeout=None):
10197                 self._schedule()
10198                 PollScheduler._poll(self, timeout=timeout)
10199
10200         def _set_max_jobs(self, max_jobs):
10201                 self._max_jobs = max_jobs
10202                 self._task_queues.jobs.max_jobs = max_jobs
10203
10204         def _background_mode(self):
10205                 """
10206                 Check if background mode is enabled and adjust states as necessary.
10207
10208                 @rtype: bool
10209                 @returns: True if background mode is enabled, False otherwise.
10210                 """
10211                 background = (self._max_jobs is True or \
10212                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10213                         not bool(self._opts_no_background.intersection(self.myopts))
10214
10215                 if background:
10216                         interactive_tasks = self._get_interactive_tasks()
10217                         if interactive_tasks:
10218                                 background = False
10219                                 writemsg_level(">>> Sending package output to stdio due " + \
10220                                         "to interactive package(s):\n",
10221                                         level=logging.INFO, noiselevel=-1)
10222                                 msg = [""]
10223                                 for pkg in interactive_tasks:
10224                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10225                                         if pkg.root != "/":
10226                                                 pkg_str += " for " + pkg.root
10227                                         msg.append(pkg_str)
10228                                 msg.append("")
10229                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10230                                         level=logging.INFO, noiselevel=-1)
10231                                 if self._max_jobs is True or self._max_jobs > 1:
10232                                         self._set_max_jobs(1)
10233                                         writemsg_level(">>> Setting --jobs=1 due " + \
10234                                                 "to the above interactive package(s)\n",
10235                                                 level=logging.INFO, noiselevel=-1)
10236
10237                 self._status_display.quiet = \
10238                         not background or \
10239                         ("--quiet" in self.myopts and \
10240                         "--verbose" not in self.myopts)
10241
10242                 self._logger.xterm_titles = \
10243                         "notitles" not in self.settings.features and \
10244                         self._status_display.quiet
10245
10246                 return background
10247
10248         def _get_interactive_tasks(self):
10249                 from portage import flatten
10250                 from portage.dep import use_reduce, paren_reduce
10251                 interactive_tasks = []
10252                 for task in self._mergelist:
10253                         if not (isinstance(task, Package) and \
10254                                 task.operation == "merge"):
10255                                 continue
10256                         try:
10257                                 properties = flatten(use_reduce(paren_reduce(
10258                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10259                         except portage.exception.InvalidDependString, e:
10260                                 show_invalid_depstring_notice(task,
10261                                         task.metadata["PROPERTIES"], str(e))
10262                                 raise self._unknown_internal_error()
10263                         if "interactive" in properties:
10264                                 interactive_tasks.append(task)
10265                 return interactive_tasks
10266
10267         def _set_digraph(self, digraph):
10268                 if "--nodeps" in self.myopts or \
10269                         (self._max_jobs is not True and self._max_jobs < 2):
10270                         # save some memory
10271                         self._digraph = None
10272                         return
10273
10274                 self._digraph = digraph
10275                 self._find_system_deps()
10276                 self._prune_digraph()
10277                 self._prevent_builddir_collisions()
10278
10279         def _find_system_deps(self):
10280                 """
10281                 Find system packages and their deep runtime dependencies. Before being
10282                 merged, these packages go to merge_wait_queue, to be merged when no
10283                 other packages are building.
10284                 """
10285                 deep_system_deps = self._deep_system_deps
10286                 deep_system_deps.clear()
10287                 deep_system_deps.update(
10288                         _find_deep_system_runtime_deps(self._digraph))
10289                 deep_system_deps.difference_update([pkg for pkg in \
10290                         deep_system_deps if pkg.operation != "merge"])
10291
10292         def _prune_digraph(self):
10293                 """
10294                 Prune any root nodes that are irrelevant.
10295                 """
10296
10297                 graph = self._digraph
10298                 completed_tasks = self._completed_tasks
10299                 removed_nodes = set()
10300                 while True:
10301                         for node in graph.root_nodes():
10302                                 if not isinstance(node, Package) or \
10303                                         (node.installed and node.operation == "nomerge") or \
10304                                         node.onlydeps or \
10305                                         node in completed_tasks:
10306                                         removed_nodes.add(node)
10307                         if removed_nodes:
10308                                 graph.difference_update(removed_nodes)
10309                         if not removed_nodes:
10310                                 break
10311                         removed_nodes.clear()
10312
10313         def _prevent_builddir_collisions(self):
10314                 """
10315                 When building stages, sometimes the same exact cpv needs to be merged
10316                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10317                 in the builddir. Currently, normal file locks would be inappropriate
10318                 for this purpose since emerge holds all of it's build dir locks from
10319                 the main process.
10320                 """
10321                 cpv_map = {}
10322                 for pkg in self._mergelist:
10323                         if not isinstance(pkg, Package):
10324                                 # a satisfied blocker
10325                                 continue
10326                         if pkg.installed:
10327                                 continue
10328                         if pkg.cpv not in cpv_map:
10329                                 cpv_map[pkg.cpv] = [pkg]
10330                                 continue
10331                         for earlier_pkg in cpv_map[pkg.cpv]:
10332                                 self._digraph.add(earlier_pkg, pkg,
10333                                         priority=DepPriority(buildtime=True))
10334                         cpv_map[pkg.cpv].append(pkg)
10335
10336         class _pkg_failure(portage.exception.PortageException):
10337                 """
10338                 An instance of this class is raised by unmerge() when
10339                 an uninstallation fails.
10340                 """
10341                 status = 1
10342                 def __init__(self, *pargs):
10343                         portage.exception.PortageException.__init__(self, pargs)
10344                         if pargs:
10345                                 self.status = pargs[0]
10346
10347         def _schedule_fetch(self, fetcher):
10348                 """
10349                 Schedule a fetcher on the fetch queue, in order to
10350                 serialize access to the fetch log.
10351                 """
10352                 self._task_queues.fetch.addFront(fetcher)
10353
10354         def _schedule_setup(self, setup_phase):
10355                 """
10356                 Schedule a setup phase on the merge queue, in order to
10357                 serialize unsandboxed access to the live filesystem.
10358                 """
10359                 self._task_queues.merge.addFront(setup_phase)
10360                 self._schedule()
10361
10362         def _schedule_unpack(self, unpack_phase):
10363                 """
10364                 Schedule an unpack phase on the unpack queue, in order
10365                 to serialize $DISTDIR access for live ebuilds.
10366                 """
10367                 self._task_queues.unpack.add(unpack_phase)
10368
10369         def _find_blockers(self, new_pkg):
10370                 """
10371                 Returns a callable which should be called only when
10372                 the vdb lock has been acquired.
10373                 """
10374                 def get_blockers():
10375                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10376                 return get_blockers
10377
10378         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10379                 if self._opts_ignore_blockers.intersection(self.myopts):
10380                         return None
10381
10382                 # Call gc.collect() here to avoid heap overflow that
10383                 # triggers 'Cannot allocate memory' errors (reported
10384                 # with python-2.5).
10385                 import gc
10386                 gc.collect()
10387
10388                 blocker_db = self._blocker_db[new_pkg.root]
10389
10390                 blocker_dblinks = []
10391                 for blocking_pkg in blocker_db.findInstalledBlockers(
10392                         new_pkg, acquire_lock=acquire_lock):
10393                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10394                                 continue
10395                         if new_pkg.cpv == blocking_pkg.cpv:
10396                                 continue
10397                         blocker_dblinks.append(portage.dblink(
10398                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10399                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10400                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10401
10402                 gc.collect()
10403
10404                 return blocker_dblinks
10405
10406         def _dblink_pkg(self, pkg_dblink):
10407                 cpv = pkg_dblink.mycpv
10408                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10409                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10410                 installed = type_name == "installed"
10411                 return self._pkg(cpv, type_name, root_config, installed=installed)
10412
10413         def _append_to_log_path(self, log_path, msg):
10414                 f = open(log_path, 'a')
10415                 try:
10416                         f.write(msg)
10417                 finally:
10418                         f.close()
10419
10420         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10421
10422                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10423                 log_file = None
10424                 out = sys.stdout
10425                 background = self._background
10426
10427                 if background and log_path is not None:
10428                         log_file = open(log_path, 'a')
10429                         out = log_file
10430
10431                 try:
10432                         for msg in msgs:
10433                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10434                 finally:
10435                         if log_file is not None:
10436                                 log_file.close()
10437
10438         def _dblink_emerge_log(self, msg):
10439                 self._logger.log(msg)
10440
10441         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10442                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10443                 background = self._background
10444
10445                 if log_path is None:
10446                         if not (background and level < logging.WARN):
10447                                 portage.util.writemsg_level(msg,
10448                                         level=level, noiselevel=noiselevel)
10449                 else:
10450                         if not background:
10451                                 portage.util.writemsg_level(msg,
10452                                         level=level, noiselevel=noiselevel)
10453                         self._append_to_log_path(log_path, msg)
10454
10455         def _dblink_ebuild_phase(self,
10456                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10457                 """
10458                 Using this callback for merge phases allows the scheduler
10459                 to run while these phases execute asynchronously, and allows
10460                 the scheduler control output handling.
10461                 """
10462
10463                 scheduler = self._sched_iface
10464                 settings = pkg_dblink.settings
10465                 pkg = self._dblink_pkg(pkg_dblink)
10466                 background = self._background
10467                 log_path = settings.get("PORTAGE_LOG_FILE")
10468
10469                 ebuild_phase = EbuildPhase(background=background,
10470                         pkg=pkg, phase=phase, scheduler=scheduler,
10471                         settings=settings, tree=pkg_dblink.treetype)
10472                 ebuild_phase.start()
10473                 ebuild_phase.wait()
10474
10475                 return ebuild_phase.returncode
10476
10477         def _generate_digests(self):
10478                 """
10479                 Generate digests if necessary for --digests or FEATURES=digest.
10480                 In order to avoid interference, this must done before parallel
10481                 tasks are started.
10482                 """
10483
10484                 if '--fetchonly' in self.myopts:
10485                         return os.EX_OK
10486
10487                 digest = '--digest' in self.myopts
10488                 if not digest:
10489                         for pkgsettings in self.pkgsettings.itervalues():
10490                                 if 'digest' in pkgsettings.features:
10491                                         digest = True
10492                                         break
10493
10494                 if not digest:
10495                         return os.EX_OK
10496
10497                 for x in self._mergelist:
10498                         if not isinstance(x, Package) or \
10499                                 x.type_name != 'ebuild' or \
10500                                 x.operation != 'merge':
10501                                 continue
10502                         pkgsettings = self.pkgsettings[x.root]
10503                         if '--digest' not in self.myopts and \
10504                                 'digest' not in pkgsettings.features:
10505                                 continue
10506                         portdb = x.root_config.trees['porttree'].dbapi
10507                         ebuild_path = portdb.findname(x.cpv)
10508                         if not ebuild_path:
10509                                 writemsg_level(
10510                                         "!!! Could not locate ebuild for '%s'.\n" \
10511                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10512                                 return 1
10513                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10514                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10515                                 writemsg_level(
10516                                         "!!! Unable to generate manifest for '%s'.\n" \
10517                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10518                                 return 1
10519
10520                 return os.EX_OK
10521
10522         def _check_manifests(self):
10523                 # Verify all the manifests now so that the user is notified of failure
10524                 # as soon as possible.
10525                 if "strict" not in self.settings.features or \
10526                         "--fetchonly" in self.myopts or \
10527                         "--fetch-all-uri" in self.myopts:
10528                         return os.EX_OK
10529
10530                 shown_verifying_msg = False
10531                 quiet_settings = {}
10532                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10533                         quiet_config = portage.config(clone=pkgsettings)
10534                         quiet_config["PORTAGE_QUIET"] = "1"
10535                         quiet_config.backup_changes("PORTAGE_QUIET")
10536                         quiet_settings[myroot] = quiet_config
10537                         del quiet_config
10538
10539                 for x in self._mergelist:
10540                         if not isinstance(x, Package) or \
10541                                 x.type_name != "ebuild":
10542                                 continue
10543
10544                         if not shown_verifying_msg:
10545                                 shown_verifying_msg = True
10546                                 self._status_msg("Verifying ebuild manifests")
10547
10548                         root_config = x.root_config
10549                         portdb = root_config.trees["porttree"].dbapi
10550                         quiet_config = quiet_settings[root_config.root]
10551                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10552                         if not portage.digestcheck([], quiet_config, strict=True):
10553                                 return 1
10554
10555                 return os.EX_OK
10556
10557         def _add_prefetchers(self):
10558
10559                 if not self._parallel_fetch:
10560                         return
10561
10562                 if self._parallel_fetch:
10563                         self._status_msg("Starting parallel fetch")
10564
10565                         prefetchers = self._prefetchers
10566                         getbinpkg = "--getbinpkg" in self.myopts
10567
10568                         # In order to avoid "waiting for lock" messages
10569                         # at the beginning, which annoy users, never
10570                         # spawn a prefetcher for the first package.
10571                         for pkg in self._mergelist[1:]:
10572                                 prefetcher = self._create_prefetcher(pkg)
10573                                 if prefetcher is not None:
10574                                         self._task_queues.fetch.add(prefetcher)
10575                                         prefetchers[pkg] = prefetcher
10576
10577         def _create_prefetcher(self, pkg):
10578                 """
10579                 @return: a prefetcher, or None if not applicable
10580                 """
10581                 prefetcher = None
10582
10583                 if not isinstance(pkg, Package):
10584                         pass
10585
10586                 elif pkg.type_name == "ebuild":
10587
10588                         prefetcher = EbuildFetcher(background=True,
10589                                 config_pool=self._ConfigPool(pkg.root,
10590                                 self._allocate_config, self._deallocate_config),
10591                                 fetchonly=1, logfile=self._fetch_log,
10592                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10593
10594                 elif pkg.type_name == "binary" and \
10595                         "--getbinpkg" in self.myopts and \
10596                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10597
10598                         prefetcher = BinpkgPrefetcher(background=True,
10599                                 pkg=pkg, scheduler=self._sched_iface)
10600
10601                 return prefetcher
10602
10603         def _is_restart_scheduled(self):
10604                 """
10605                 Check if the merge list contains a replacement
10606                 for the current running instance, that will result
10607                 in restart after merge.
10608                 @rtype: bool
10609                 @returns: True if a restart is scheduled, False otherwise.
10610                 """
10611                 if self._opts_no_restart.intersection(self.myopts):
10612                         return False
10613
10614                 mergelist = self._mergelist
10615
10616                 for i, pkg in enumerate(mergelist):
10617                         if self._is_restart_necessary(pkg) and \
10618                                 i != len(mergelist) - 1:
10619                                 return True
10620
10621                 return False
10622
10623         def _is_restart_necessary(self, pkg):
10624                 """
10625                 @return: True if merging the given package
10626                         requires restart, False otherwise.
10627                 """
10628
10629                 # Figure out if we need a restart.
10630                 if pkg.root == self._running_root.root and \
10631                         portage.match_from_list(
10632                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10633                         if self._running_portage:
10634                                 return pkg.cpv != self._running_portage.cpv
10635                         return True
10636                 return False
10637
10638         def _restart_if_necessary(self, pkg):
10639                 """
10640                 Use execv() to restart emerge. This happens
10641                 if portage upgrades itself and there are
10642                 remaining packages in the list.
10643                 """
10644
10645                 if self._opts_no_restart.intersection(self.myopts):
10646                         return
10647
10648                 if not self._is_restart_necessary(pkg):
10649                         return
10650
10651                 if pkg == self._mergelist[-1]:
10652                         return
10653
10654                 self._main_loop_cleanup()
10655
10656                 logger = self._logger
10657                 pkg_count = self._pkg_count
10658                 mtimedb = self._mtimedb
10659                 bad_resume_opts = self._bad_resume_opts
10660
10661                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10662                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10663
10664                 logger.log(" *** RESTARTING " + \
10665                         "emerge via exec() after change of " + \
10666                         "portage version.")
10667
10668                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10669                 mtimedb.commit()
10670                 portage.run_exitfuncs()
10671                 mynewargv = [sys.argv[0], "--resume"]
10672                 resume_opts = self.myopts.copy()
10673                 # For automatic resume, we need to prevent
10674                 # any of bad_resume_opts from leaking in
10675                 # via EMERGE_DEFAULT_OPTS.
10676                 resume_opts["--ignore-default-opts"] = True
10677                 for myopt, myarg in resume_opts.iteritems():
10678                         if myopt not in bad_resume_opts:
10679                                 if myarg is True:
10680                                         mynewargv.append(myopt)
10681                                 else:
10682                                         mynewargv.append(myopt +"="+ str(myarg))
10683                 # priority only needs to be adjusted on the first run
10684                 os.environ["PORTAGE_NICENESS"] = "0"
10685                 os.execv(mynewargv[0], mynewargv)
10686
10687         def merge(self):
10688
10689                 if "--resume" in self.myopts:
10690                         # We're resuming.
10691                         portage.writemsg_stdout(
10692                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10693                         self._logger.log(" *** Resuming merge...")
10694
10695                 self._save_resume_list()
10696
10697                 try:
10698                         self._background = self._background_mode()
10699                 except self._unknown_internal_error:
10700                         return 1
10701
10702                 for root in self.trees:
10703                         root_config = self.trees[root]["root_config"]
10704
10705                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10706                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10707                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10708                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10709                         if not tmpdir or not os.path.isdir(tmpdir):
10710                                 msg = "The directory specified in your " + \
10711                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10712                                 "does not exist. Please create this " + \
10713                                 "directory or correct your PORTAGE_TMPDIR setting."
10714                                 msg = textwrap.wrap(msg, 70)
10715                                 out = portage.output.EOutput()
10716                                 for l in msg:
10717                                         out.eerror(l)
10718                                 return 1
10719
10720                         if self._background:
10721                                 root_config.settings.unlock()
10722                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10723                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10724                                 root_config.settings.lock()
10725
10726                         self.pkgsettings[root] = portage.config(
10727                                 clone=root_config.settings)
10728
10729                 rval = self._generate_digests()
10730                 if rval != os.EX_OK:
10731                         return rval
10732
10733                 rval = self._check_manifests()
10734                 if rval != os.EX_OK:
10735                         return rval
10736
10737                 keep_going = "--keep-going" in self.myopts
10738                 fetchonly = self._build_opts.fetchonly
10739                 mtimedb = self._mtimedb
10740                 failed_pkgs = self._failed_pkgs
10741
10742                 while True:
10743                         rval = self._merge()
10744                         if rval == os.EX_OK or fetchonly or not keep_going:
10745                                 break
10746                         if "resume" not in mtimedb:
10747                                 break
10748                         mergelist = self._mtimedb["resume"].get("mergelist")
10749                         if not mergelist:
10750                                 break
10751
10752                         if not failed_pkgs:
10753                                 break
10754
10755                         for failed_pkg in failed_pkgs:
10756                                 mergelist.remove(list(failed_pkg.pkg))
10757
10758                         self._failed_pkgs_all.extend(failed_pkgs)
10759                         del failed_pkgs[:]
10760
10761                         if not mergelist:
10762                                 break
10763
10764                         if not self._calc_resume_list():
10765                                 break
10766
10767                         clear_caches(self.trees)
10768                         if not self._mergelist:
10769                                 break
10770
10771                         self._save_resume_list()
10772                         self._pkg_count.curval = 0
10773                         self._pkg_count.maxval = len([x for x in self._mergelist \
10774                                 if isinstance(x, Package) and x.operation == "merge"])
10775                         self._status_display.maxval = self._pkg_count.maxval
10776
10777                 self._logger.log(" *** Finished. Cleaning up...")
10778
10779                 if failed_pkgs:
10780                         self._failed_pkgs_all.extend(failed_pkgs)
10781                         del failed_pkgs[:]
10782
10783                 background = self._background
10784                 failure_log_shown = False
10785                 if background and len(self._failed_pkgs_all) == 1:
10786                         # If only one package failed then just show it's
10787                         # whole log for easy viewing.
10788                         failed_pkg = self._failed_pkgs_all[-1]
10789                         build_dir = failed_pkg.build_dir
10790                         log_file = None
10791
10792                         log_paths = [failed_pkg.build_log]
10793
10794                         log_path = self._locate_failure_log(failed_pkg)
10795                         if log_path is not None:
10796                                 try:
10797                                         log_file = open(log_path)
10798                                 except IOError:
10799                                         pass
10800
10801                         if log_file is not None:
10802                                 try:
10803                                         for line in log_file:
10804                                                 writemsg_level(line, noiselevel=-1)
10805                                 finally:
10806                                         log_file.close()
10807                                 failure_log_shown = True
10808
10809                 # Dump mod_echo output now since it tends to flood the terminal.
10810                 # This allows us to avoid having more important output, generated
10811                 # later, from being swept away by the mod_echo output.
10812                 mod_echo_output =  _flush_elog_mod_echo()
10813
10814                 if background and not failure_log_shown and \
10815                         self._failed_pkgs_all and \
10816                         self._failed_pkgs_die_msgs and \
10817                         not mod_echo_output:
10818
10819                         printer = portage.output.EOutput()
10820                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10821                                 root_msg = ""
10822                                 if mysettings["ROOT"] != "/":
10823                                         root_msg = " merged to %s" % mysettings["ROOT"]
10824                                 print
10825                                 printer.einfo("Error messages for package %s%s:" % \
10826                                         (colorize("INFORM", key), root_msg))
10827                                 print
10828                                 for phase in portage.const.EBUILD_PHASES:
10829                                         if phase not in logentries:
10830                                                 continue
10831                                         for msgtype, msgcontent in logentries[phase]:
10832                                                 if isinstance(msgcontent, basestring):
10833                                                         msgcontent = [msgcontent]
10834                                                 for line in msgcontent:
10835                                                         printer.eerror(line.strip("\n"))
10836
10837                 if self._post_mod_echo_msgs:
10838                         for msg in self._post_mod_echo_msgs:
10839                                 msg()
10840
10841                 if len(self._failed_pkgs_all) > 1 or \
10842                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10843                         if len(self._failed_pkgs_all) > 1:
10844                                 msg = "The following %d packages have " % \
10845                                         len(self._failed_pkgs_all) + \
10846                                         "failed to build or install:"
10847                         else:
10848                                 msg = "The following package has " + \
10849                                         "failed to build or install:"
10850                         prefix = bad(" * ")
10851                         writemsg(prefix + "\n", noiselevel=-1)
10852                         from textwrap import wrap
10853                         for line in wrap(msg, 72):
10854                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10855                         writemsg(prefix + "\n", noiselevel=-1)
10856                         for failed_pkg in self._failed_pkgs_all:
10857                                 writemsg("%s\t%s\n" % (prefix,
10858                                         colorize("INFORM", str(failed_pkg.pkg))),
10859                                         noiselevel=-1)
10860                         writemsg(prefix + "\n", noiselevel=-1)
10861
10862                 return rval
10863
10864         def _elog_listener(self, mysettings, key, logentries, fulltext):
10865                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10866                 if errors:
10867                         self._failed_pkgs_die_msgs.append(
10868                                 (mysettings, key, errors))
10869
10870         def _locate_failure_log(self, failed_pkg):
10871
10872                 build_dir = failed_pkg.build_dir
10873                 log_file = None
10874
10875                 log_paths = [failed_pkg.build_log]
10876
10877                 for log_path in log_paths:
10878                         if not log_path:
10879                                 continue
10880
10881                         try:
10882                                 log_size = os.stat(log_path).st_size
10883                         except OSError:
10884                                 continue
10885
10886                         if log_size == 0:
10887                                 continue
10888
10889                         return log_path
10890
10891                 return None
10892
10893         def _add_packages(self):
10894                 pkg_queue = self._pkg_queue
10895                 for pkg in self._mergelist:
10896                         if isinstance(pkg, Package):
10897                                 pkg_queue.append(pkg)
10898                         elif isinstance(pkg, Blocker):
10899                                 pass
10900
10901         def _system_merge_started(self, merge):
10902                 """
10903                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10904                 """
10905                 graph = self._digraph
10906                 if graph is None:
10907                         return
10908                 pkg = merge.merge.pkg
10909
10910                 # Skip this if $ROOT != / since it shouldn't matter if there
10911                 # are unsatisfied system runtime deps in this case.
10912                 if pkg.root != '/':
10913                         return
10914
10915                 completed_tasks = self._completed_tasks
10916                 unsatisfied = self._unsatisfied_system_deps
10917
10918                 def ignore_non_runtime_or_satisfied(priority):
10919                         """
10920                         Ignore non-runtime and satisfied runtime priorities.
10921                         """
10922                         if isinstance(priority, DepPriority) and \
10923                                 not priority.satisfied and \
10924                                 (priority.runtime or priority.runtime_post):
10925                                 return False
10926                         return True
10927
10928                 # When checking for unsatisfied runtime deps, only check
10929                 # direct deps since indirect deps are checked when the
10930                 # corresponding parent is merged.
10931                 for child in graph.child_nodes(pkg,
10932                         ignore_priority=ignore_non_runtime_or_satisfied):
10933                         if not isinstance(child, Package) or \
10934                                 child.operation == 'uninstall':
10935                                 continue
10936                         if child is pkg:
10937                                 continue
10938                         if child.operation == 'merge' and \
10939                                 child not in completed_tasks:
10940                                 unsatisfied.add(child)
10941
10942         def _merge_wait_exit_handler(self, task):
10943                 self._merge_wait_scheduled.remove(task)
10944                 self._merge_exit(task)
10945
10946         def _merge_exit(self, merge):
10947                 self._do_merge_exit(merge)
10948                 self._deallocate_config(merge.merge.settings)
10949                 if merge.returncode == os.EX_OK and \
10950                         not merge.merge.pkg.installed:
10951                         self._status_display.curval += 1
10952                 self._status_display.merges = len(self._task_queues.merge)
10953                 self._schedule()
10954
10955         def _do_merge_exit(self, merge):
10956                 pkg = merge.merge.pkg
10957                 if merge.returncode != os.EX_OK:
10958                         settings = merge.merge.settings
10959                         build_dir = settings.get("PORTAGE_BUILDDIR")
10960                         build_log = settings.get("PORTAGE_LOG_FILE")
10961
10962                         self._failed_pkgs.append(self._failed_pkg(
10963                                 build_dir=build_dir, build_log=build_log,
10964                                 pkg=pkg,
10965                                 returncode=merge.returncode))
10966                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10967
10968                         self._status_display.failed = len(self._failed_pkgs)
10969                         return
10970
10971                 self._task_complete(pkg)
10972                 pkg_to_replace = merge.merge.pkg_to_replace
10973                 if pkg_to_replace is not None:
10974                         # When a package is replaced, mark it's uninstall
10975                         # task complete (if any).
10976                         uninst_hash_key = \
10977                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10978                         self._task_complete(uninst_hash_key)
10979
10980                 if pkg.installed:
10981                         return
10982
10983                 self._restart_if_necessary(pkg)
10984
10985                 # Call mtimedb.commit() after each merge so that
10986                 # --resume still works after being interrupted
10987                 # by reboot, sigkill or similar.
10988                 mtimedb = self._mtimedb
10989                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10990                 if not mtimedb["resume"]["mergelist"]:
10991                         del mtimedb["resume"]
10992                 mtimedb.commit()
10993
10994         def _build_exit(self, build):
10995                 if build.returncode == os.EX_OK:
10996                         self.curval += 1
10997                         merge = PackageMerge(merge=build)
10998                         if not build.build_opts.buildpkgonly and \
10999                                 build.pkg in self._deep_system_deps:
11000                                 # Since dependencies on system packages are frequently
11001                                 # unspecified, merge them only when no builds are executing.
11002                                 self._merge_wait_queue.append(merge)
11003                                 merge.addStartListener(self._system_merge_started)
11004                         else:
11005                                 merge.addExitListener(self._merge_exit)
11006                                 self._task_queues.merge.add(merge)
11007                                 self._status_display.merges = len(self._task_queues.merge)
11008                 else:
11009                         settings = build.settings
11010                         build_dir = settings.get("PORTAGE_BUILDDIR")
11011                         build_log = settings.get("PORTAGE_LOG_FILE")
11012
11013                         self._failed_pkgs.append(self._failed_pkg(
11014                                 build_dir=build_dir, build_log=build_log,
11015                                 pkg=build.pkg,
11016                                 returncode=build.returncode))
11017                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11018
11019                         self._status_display.failed = len(self._failed_pkgs)
11020                         self._deallocate_config(build.settings)
11021                 self._jobs -= 1
11022                 self._status_display.running = self._jobs
11023                 self._schedule()
11024
11025         def _extract_exit(self, build):
11026                 self._build_exit(build)
11027
11028         def _task_complete(self, pkg):
11029                 self._completed_tasks.add(pkg)
11030                 self._unsatisfied_system_deps.discard(pkg)
11031                 self._choose_pkg_return_early = False
11032
11033         def _merge(self):
11034
11035                 self._add_prefetchers()
11036                 self._add_packages()
11037                 pkg_queue = self._pkg_queue
11038                 failed_pkgs = self._failed_pkgs
11039                 portage.locks._quiet = self._background
11040                 portage.elog._emerge_elog_listener = self._elog_listener
11041                 rval = os.EX_OK
11042
11043                 try:
11044                         self._main_loop()
11045                 finally:
11046                         self._main_loop_cleanup()
11047                         portage.locks._quiet = False
11048                         portage.elog._emerge_elog_listener = None
11049                         if failed_pkgs:
11050                                 rval = failed_pkgs[-1].returncode
11051
11052                 return rval
11053
11054         def _main_loop_cleanup(self):
11055                 del self._pkg_queue[:]
11056                 self._completed_tasks.clear()
11057                 self._deep_system_deps.clear()
11058                 self._unsatisfied_system_deps.clear()
11059                 self._choose_pkg_return_early = False
11060                 self._status_display.reset()
11061                 self._digraph = None
11062                 self._task_queues.fetch.clear()
11063
11064         def _choose_pkg(self):
11065                 """
11066                 Choose a task that has all it's dependencies satisfied.
11067                 """
11068
11069                 if self._choose_pkg_return_early:
11070                         return None
11071
11072                 if self._digraph is None:
11073                         if (self._jobs or self._task_queues.merge) and \
11074                                 not ("--nodeps" in self.myopts and \
11075                                 (self._max_jobs is True or self._max_jobs > 1)):
11076                                 self._choose_pkg_return_early = True
11077                                 return None
11078                         return self._pkg_queue.pop(0)
11079
11080                 if not (self._jobs or self._task_queues.merge):
11081                         return self._pkg_queue.pop(0)
11082
11083                 self._prune_digraph()
11084
11085                 chosen_pkg = None
11086                 later = set(self._pkg_queue)
11087                 for pkg in self._pkg_queue:
11088                         later.remove(pkg)
11089                         if not self._dependent_on_scheduled_merges(pkg, later):
11090                                 chosen_pkg = pkg
11091                                 break
11092
11093                 if chosen_pkg is not None:
11094                         self._pkg_queue.remove(chosen_pkg)
11095
11096                 if chosen_pkg is None:
11097                         # There's no point in searching for a package to
11098                         # choose until at least one of the existing jobs
11099                         # completes.
11100                         self._choose_pkg_return_early = True
11101
11102                 return chosen_pkg
11103
11104         def _dependent_on_scheduled_merges(self, pkg, later):
11105                 """
11106                 Traverse the subgraph of the given packages deep dependencies
11107                 to see if it contains any scheduled merges.
11108                 @param pkg: a package to check dependencies for
11109                 @type pkg: Package
11110                 @param later: packages for which dependence should be ignored
11111                         since they will be merged later than pkg anyway and therefore
11112                         delaying the merge of pkg will not result in a more optimal
11113                         merge order
11114                 @type later: set
11115                 @rtype: bool
11116                 @returns: True if the package is dependent, False otherwise.
11117                 """
11118
11119                 graph = self._digraph
11120                 completed_tasks = self._completed_tasks
11121
11122                 dependent = False
11123                 traversed_nodes = set([pkg])
11124                 direct_deps = graph.child_nodes(pkg)
11125                 node_stack = direct_deps
11126                 direct_deps = frozenset(direct_deps)
11127                 while node_stack:
11128                         node = node_stack.pop()
11129                         if node in traversed_nodes:
11130                                 continue
11131                         traversed_nodes.add(node)
11132                         if not ((node.installed and node.operation == "nomerge") or \
11133                                 (node.operation == "uninstall" and \
11134                                 node not in direct_deps) or \
11135                                 node in completed_tasks or \
11136                                 node in later):
11137                                 dependent = True
11138                                 break
11139                         node_stack.extend(graph.child_nodes(node))
11140
11141                 return dependent
11142
11143         def _allocate_config(self, root):
11144                 """
11145                 Allocate a unique config instance for a task in order
11146                 to prevent interference between parallel tasks.
11147                 """
11148                 if self._config_pool[root]:
11149                         temp_settings = self._config_pool[root].pop()
11150                 else:
11151                         temp_settings = portage.config(clone=self.pkgsettings[root])
11152                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11153                 # performance reasons, call it here to make sure all settings from the
11154                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11155                 temp_settings.reload()
11156                 temp_settings.reset()
11157                 return temp_settings
11158
11159         def _deallocate_config(self, settings):
11160                 self._config_pool[settings["ROOT"]].append(settings)
11161
11162         def _main_loop(self):
11163
11164                 # Only allow 1 job max if a restart is scheduled
11165                 # due to portage update.
11166                 if self._is_restart_scheduled() or \
11167                         self._opts_no_background.intersection(self.myopts):
11168                         self._set_max_jobs(1)
11169
11170                 merge_queue = self._task_queues.merge
11171
11172                 while self._schedule():
11173                         if self._poll_event_handlers:
11174                                 self._poll_loop()
11175
11176                 while True:
11177                         self._schedule()
11178                         if not (self._jobs or merge_queue):
11179                                 break
11180                         if self._poll_event_handlers:
11181                                 self._poll_loop()
11182
11183         def _keep_scheduling(self):
11184                 return bool(self._pkg_queue and \
11185                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11186
11187         def _schedule_tasks(self):
11188
11189                 # When the number of jobs drops to zero, process all waiting merges.
11190                 if not self._jobs and self._merge_wait_queue:
11191                         for task in self._merge_wait_queue:
11192                                 task.addExitListener(self._merge_wait_exit_handler)
11193                                 self._task_queues.merge.add(task)
11194                         self._status_display.merges = len(self._task_queues.merge)
11195                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11196                         del self._merge_wait_queue[:]
11197
11198                 self._schedule_tasks_imp()
11199                 self._status_display.display()
11200
11201                 state_change = 0
11202                 for q in self._task_queues.values():
11203                         if q.schedule():
11204                                 state_change += 1
11205
11206                 # Cancel prefetchers if they're the only reason
11207                 # the main poll loop is still running.
11208                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11209                         not (self._jobs or self._task_queues.merge) and \
11210                         self._task_queues.fetch:
11211                         self._task_queues.fetch.clear()
11212                         state_change += 1
11213
11214                 if state_change:
11215                         self._schedule_tasks_imp()
11216                         self._status_display.display()
11217
11218                 return self._keep_scheduling()
11219
11220         def _job_delay(self):
11221                 """
11222                 @rtype: bool
11223                 @returns: True if job scheduling should be delayed, False otherwise.
11224                 """
11225
11226                 if self._jobs and self._max_load is not None:
11227
11228                         current_time = time.time()
11229
11230                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11231                         if delay > self._job_delay_max:
11232                                 delay = self._job_delay_max
11233                         if (current_time - self._previous_job_start_time) < delay:
11234                                 return True
11235
11236                 return False
11237
11238         def _schedule_tasks_imp(self):
11239                 """
11240                 @rtype: bool
11241                 @returns: True if state changed, False otherwise.
11242                 """
11243
11244                 state_change = 0
11245
11246                 while True:
11247
11248                         if not self._keep_scheduling():
11249                                 return bool(state_change)
11250
11251                         if self._choose_pkg_return_early or \
11252                                 self._merge_wait_scheduled or \
11253                                 (self._jobs and self._unsatisfied_system_deps) or \
11254                                 not self._can_add_job() or \
11255                                 self._job_delay():
11256                                 return bool(state_change)
11257
11258                         pkg = self._choose_pkg()
11259                         if pkg is None:
11260                                 return bool(state_change)
11261
11262                         state_change += 1
11263
11264                         if not pkg.installed:
11265                                 self._pkg_count.curval += 1
11266
11267                         task = self._task(pkg)
11268
11269                         if pkg.installed:
11270                                 merge = PackageMerge(merge=task)
11271                                 merge.addExitListener(self._merge_exit)
11272                                 self._task_queues.merge.add(merge)
11273
11274                         elif pkg.built:
11275                                 self._jobs += 1
11276                                 self._previous_job_start_time = time.time()
11277                                 self._status_display.running = self._jobs
11278                                 task.addExitListener(self._extract_exit)
11279                                 self._task_queues.jobs.add(task)
11280
11281                         else:
11282                                 self._jobs += 1
11283                                 self._previous_job_start_time = time.time()
11284                                 self._status_display.running = self._jobs
11285                                 task.addExitListener(self._build_exit)
11286                                 self._task_queues.jobs.add(task)
11287
11288                 return bool(state_change)
11289
11290         def _task(self, pkg):
11291
11292                 pkg_to_replace = None
11293                 if pkg.operation != "uninstall":
11294                         vardb = pkg.root_config.trees["vartree"].dbapi
11295                         previous_cpv = vardb.match(pkg.slot_atom)
11296                         if previous_cpv:
11297                                 previous_cpv = previous_cpv.pop()
11298                                 pkg_to_replace = self._pkg(previous_cpv,
11299                                         "installed", pkg.root_config, installed=True)
11300
11301                 task = MergeListItem(args_set=self._args_set,
11302                         background=self._background, binpkg_opts=self._binpkg_opts,
11303                         build_opts=self._build_opts,
11304                         config_pool=self._ConfigPool(pkg.root,
11305                         self._allocate_config, self._deallocate_config),
11306                         emerge_opts=self.myopts,
11307                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11308                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11309                         pkg_to_replace=pkg_to_replace,
11310                         prefetcher=self._prefetchers.get(pkg),
11311                         scheduler=self._sched_iface,
11312                         settings=self._allocate_config(pkg.root),
11313                         statusMessage=self._status_msg,
11314                         world_atom=self._world_atom)
11315
11316                 return task
11317
11318         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11319                 pkg = failed_pkg.pkg
11320                 msg = "%s to %s %s" % \
11321                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11322                 if pkg.root != "/":
11323                         msg += " %s %s" % (preposition, pkg.root)
11324
11325                 log_path = self._locate_failure_log(failed_pkg)
11326                 if log_path is not None:
11327                         msg += ", Log file:"
11328                 self._status_msg(msg)
11329
11330                 if log_path is not None:
11331                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11332
11333         def _status_msg(self, msg):
11334                 """
11335                 Display a brief status message (no newlines) in the status display.
11336                 This is called by tasks to provide feedback to the user. This
11337                 delegates the resposibility of generating \r and \n control characters,
11338                 to guarantee that lines are created or erased when necessary and
11339                 appropriate.
11340
11341                 @type msg: str
11342                 @param msg: a brief status message (no newlines allowed)
11343                 """
11344                 if not self._background:
11345                         writemsg_level("\n")
11346                 self._status_display.displayMessage(msg)
11347
11348         def _save_resume_list(self):
11349                 """
11350                 Do this before verifying the ebuild Manifests since it might
11351                 be possible for the user to use --resume --skipfirst get past
11352                 a non-essential package with a broken digest.
11353                 """
11354                 mtimedb = self._mtimedb
11355                 mtimedb["resume"]["mergelist"] = [list(x) \
11356                         for x in self._mergelist \
11357                         if isinstance(x, Package) and x.operation == "merge"]
11358
11359                 mtimedb.commit()
11360
11361         def _calc_resume_list(self):
11362                 """
11363                 Use the current resume list to calculate a new one,
11364                 dropping any packages with unsatisfied deps.
11365                 @rtype: bool
11366                 @returns: True if successful, False otherwise.
11367                 """
11368                 print colorize("GOOD", "*** Resuming merge...")
11369
11370                 if self._show_list():
11371                         if "--tree" in self.myopts:
11372                                 portage.writemsg_stdout("\n" + \
11373                                         darkgreen("These are the packages that " + \
11374                                         "would be merged, in reverse order:\n\n"))
11375
11376                         else:
11377                                 portage.writemsg_stdout("\n" + \
11378                                         darkgreen("These are the packages that " + \
11379                                         "would be merged, in order:\n\n"))
11380
11381                 show_spinner = "--quiet" not in self.myopts and \
11382                         "--nodeps" not in self.myopts
11383
11384                 if show_spinner:
11385                         print "Calculating dependencies  ",
11386
11387                 myparams = create_depgraph_params(self.myopts, None)
11388                 success = False
11389                 e = None
11390                 try:
11391                         success, mydepgraph, dropped_tasks = resume_depgraph(
11392                                 self.settings, self.trees, self._mtimedb, self.myopts,
11393                                 myparams, self._spinner)
11394                 except depgraph.UnsatisfiedResumeDep, exc:
11395                         # rename variable to avoid python-3.0 error:
11396                         # SyntaxError: can not delete variable 'e' referenced in nested
11397                         #              scope
11398                         e = exc
11399                         mydepgraph = e.depgraph
11400                         dropped_tasks = set()
11401
11402                 if show_spinner:
11403                         print "\b\b... done!"
11404
11405                 if e is not None:
11406                         def unsatisfied_resume_dep_msg():
11407                                 mydepgraph.display_problems()
11408                                 out = portage.output.EOutput()
11409                                 out.eerror("One or more packages are either masked or " + \
11410                                         "have missing dependencies:")
11411                                 out.eerror("")
11412                                 indent = "  "
11413                                 show_parents = set()
11414                                 for dep in e.value:
11415                                         if dep.parent in show_parents:
11416                                                 continue
11417                                         show_parents.add(dep.parent)
11418                                         if dep.atom is None:
11419                                                 out.eerror(indent + "Masked package:")
11420                                                 out.eerror(2 * indent + str(dep.parent))
11421                                                 out.eerror("")
11422                                         else:
11423                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11424                                                 out.eerror(2 * indent + str(dep.parent))
11425                                                 out.eerror("")
11426                                 msg = "The resume list contains packages " + \
11427                                         "that are either masked or have " + \
11428                                         "unsatisfied dependencies. " + \
11429                                         "Please restart/continue " + \
11430                                         "the operation manually, or use --skipfirst " + \
11431                                         "to skip the first package in the list and " + \
11432                                         "any other packages that may be " + \
11433                                         "masked or have missing dependencies."
11434                                 for line in textwrap.wrap(msg, 72):
11435                                         out.eerror(line)
11436                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11437                         return False
11438
11439                 if success and self._show_list():
11440                         mylist = mydepgraph.altlist()
11441                         if mylist:
11442                                 if "--tree" in self.myopts:
11443                                         mylist.reverse()
11444                                 mydepgraph.display(mylist, favorites=self._favorites)
11445
11446                 if not success:
11447                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11448                         return False
11449                 mydepgraph.display_problems()
11450
11451                 mylist = mydepgraph.altlist()
11452                 mydepgraph.break_refs(mylist)
11453                 mydepgraph.break_refs(dropped_tasks)
11454                 self._mergelist = mylist
11455                 self._set_digraph(mydepgraph.schedulerGraph())
11456
11457                 msg_width = 75
11458                 for task in dropped_tasks:
11459                         if not (isinstance(task, Package) and task.operation == "merge"):
11460                                 continue
11461                         pkg = task
11462                         msg = "emerge --keep-going:" + \
11463                                 " %s" % (pkg.cpv,)
11464                         if pkg.root != "/":
11465                                 msg += " for %s" % (pkg.root,)
11466                         msg += " dropped due to unsatisfied dependency."
11467                         for line in textwrap.wrap(msg, msg_width):
11468                                 eerror(line, phase="other", key=pkg.cpv)
11469                         settings = self.pkgsettings[pkg.root]
11470                         # Ensure that log collection from $T is disabled inside
11471                         # elog_process(), since any logs that might exist are
11472                         # not valid here.
11473                         settings.pop("T", None)
11474                         portage.elog.elog_process(pkg.cpv, settings)
11475                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11476
11477                 return True
11478
11479         def _show_list(self):
11480                 myopts = self.myopts
11481                 if "--quiet" not in myopts and \
11482                         ("--ask" in myopts or "--tree" in myopts or \
11483                         "--verbose" in myopts):
11484                         return True
11485                 return False
11486
11487         def _world_atom(self, pkg):
11488                 """
11489                 Add the package to the world file, but only if
11490                 it's supposed to be added. Otherwise, do nothing.
11491                 """
11492
11493                 if set(("--buildpkgonly", "--fetchonly",
11494                         "--fetch-all-uri",
11495                         "--oneshot", "--onlydeps",
11496                         "--pretend")).intersection(self.myopts):
11497                         return
11498
11499                 if pkg.root != self.target_root:
11500                         return
11501
11502                 args_set = self._args_set
11503                 if not args_set.findAtomForPackage(pkg):
11504                         return
11505
11506                 logger = self._logger
11507                 pkg_count = self._pkg_count
11508                 root_config = pkg.root_config
11509                 world_set = root_config.sets["world"]
11510                 world_locked = False
11511                 if hasattr(world_set, "lock"):
11512                         world_set.lock()
11513                         world_locked = True
11514
11515                 try:
11516                         if hasattr(world_set, "load"):
11517                                 world_set.load() # maybe it's changed on disk
11518
11519                         atom = create_world_atom(pkg, args_set, root_config)
11520                         if atom:
11521                                 if hasattr(world_set, "add"):
11522                                         self._status_msg(('Recording %s in "world" ' + \
11523                                                 'favorites file...') % atom)
11524                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11525                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11526                                         world_set.add(atom)
11527                                 else:
11528                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11529                                                 (atom,), level=logging.WARN, noiselevel=-1)
11530                 finally:
11531                         if world_locked:
11532                                 world_set.unlock()
11533
11534         def _pkg(self, cpv, type_name, root_config, installed=False):
11535                 """
11536                 Get a package instance from the cache, or create a new
11537                 one if necessary. Raises KeyError from aux_get if it
11538                 failures for some reason (package does not exist or is
11539                 corrupt).
11540                 """
11541                 operation = "merge"
11542                 if installed:
11543                         operation = "nomerge"
11544
11545                 if self._digraph is not None:
11546                         # Reuse existing instance when available.
11547                         pkg = self._digraph.get(
11548                                 (type_name, root_config.root, cpv, operation))
11549                         if pkg is not None:
11550                                 return pkg
11551
11552                 tree_type = depgraph.pkg_tree_map[type_name]
11553                 db = root_config.trees[tree_type].dbapi
11554                 db_keys = list(self.trees[root_config.root][
11555                         tree_type].dbapi._aux_cache_keys)
11556                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11557                 pkg = Package(cpv=cpv, metadata=metadata,
11558                         root_config=root_config, installed=installed)
11559                 if type_name == "ebuild":
11560                         settings = self.pkgsettings[root_config.root]
11561                         settings.setcpv(pkg)
11562                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11563                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11564
11565                 return pkg
11566
11567 class MetadataRegen(PollScheduler):
11568
11569         def __init__(self, portdb, max_jobs=None, max_load=None):
11570                 PollScheduler.__init__(self)
11571                 self._portdb = portdb
11572
11573                 if max_jobs is None:
11574                         max_jobs = 1
11575
11576                 self._max_jobs = max_jobs
11577                 self._max_load = max_load
11578                 self._sched_iface = self._sched_iface_class(
11579                         register=self._register,
11580                         schedule=self._schedule_wait,
11581                         unregister=self._unregister)
11582
11583                 self._valid_pkgs = set()
11584                 self._process_iter = self._iter_metadata_processes()
11585                 self.returncode = os.EX_OK
11586                 self._error_count = 0
11587
11588         def _iter_metadata_processes(self):
11589                 portdb = self._portdb
11590                 valid_pkgs = self._valid_pkgs
11591                 every_cp = portdb.cp_all()
11592                 every_cp.sort(reverse=True)
11593
11594                 while every_cp:
11595                         cp = every_cp.pop()
11596                         portage.writemsg_stdout("Processing %s\n" % cp)
11597                         cpv_list = portdb.cp_list(cp)
11598                         for cpv in cpv_list:
11599                                 valid_pkgs.add(cpv)
11600                                 ebuild_path, repo_path = portdb.findname2(cpv)
11601                                 metadata_process = portdb._metadata_process(
11602                                         cpv, ebuild_path, repo_path)
11603                                 if metadata_process is None:
11604                                         continue
11605                                 yield metadata_process
11606
11607         def run(self):
11608
11609                 portdb = self._portdb
11610                 from portage.cache.cache_errors import CacheError
11611                 dead_nodes = {}
11612
11613                 for mytree in portdb.porttrees:
11614                         try:
11615                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11616                         except CacheError, e:
11617                                 portage.writemsg("Error listing cache entries for " + \
11618                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11619                                 del e
11620                                 dead_nodes = None
11621                                 break
11622
11623                 while self._schedule():
11624                         self._poll_loop()
11625
11626                 while self._jobs:
11627                         self._poll_loop()
11628
11629                 if dead_nodes:
11630                         for y in self._valid_pkgs:
11631                                 for mytree in portdb.porttrees:
11632                                         if portdb.findname2(y, mytree=mytree)[0]:
11633                                                 dead_nodes[mytree].discard(y)
11634
11635                         for mytree, nodes in dead_nodes.iteritems():
11636                                 auxdb = portdb.auxdb[mytree]
11637                                 for y in nodes:
11638                                         try:
11639                                                 del auxdb[y]
11640                                         except (KeyError, CacheError):
11641                                                 pass
11642
11643         def _schedule_tasks(self):
11644                 """
11645                 @rtype: bool
11646                 @returns: True if there may be remaining tasks to schedule,
11647                         False otherwise.
11648                 """
11649                 while self._can_add_job():
11650                         try:
11651                                 metadata_process = self._process_iter.next()
11652                         except StopIteration:
11653                                 return False
11654
11655                         self._jobs += 1
11656                         metadata_process.scheduler = self._sched_iface
11657                         metadata_process.addExitListener(self._metadata_exit)
11658                         metadata_process.start()
11659                 return True
11660
11661         def _metadata_exit(self, metadata_process):
11662                 self._jobs -= 1
11663                 if metadata_process.returncode != os.EX_OK:
11664                         self.returncode = 1
11665                         self._error_count += 1
11666                         self._valid_pkgs.discard(metadata_process.cpv)
11667                         portage.writemsg("Error processing %s, continuing...\n" % \
11668                                 (metadata_process.cpv,))
11669                 self._schedule()
11670
11671 class UninstallFailure(portage.exception.PortageException):
11672         """
11673         An instance of this class is raised by unmerge() when
11674         an uninstallation fails.
11675         """
11676         status = 1
11677         def __init__(self, *pargs):
11678                 portage.exception.PortageException.__init__(self, pargs)
11679                 if pargs:
11680                         self.status = pargs[0]
11681
11682 def unmerge(root_config, myopts, unmerge_action,
11683         unmerge_files, ldpath_mtimes, autoclean=0,
11684         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11685         scheduler=None, writemsg_level=portage.util.writemsg_level):
11686
11687         quiet = "--quiet" in myopts
11688         settings = root_config.settings
11689         sets = root_config.sets
11690         vartree = root_config.trees["vartree"]
11691         candidate_catpkgs=[]
11692         global_unmerge=0
11693         xterm_titles = "notitles" not in settings.features
11694         out = portage.output.EOutput()
11695         pkg_cache = {}
11696         db_keys = list(vartree.dbapi._aux_cache_keys)
11697
11698         def _pkg(cpv):
11699                 pkg = pkg_cache.get(cpv)
11700                 if pkg is None:
11701                         pkg = Package(cpv=cpv, installed=True,
11702                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11703                                 root_config=root_config,
11704                                 type_name="installed")
11705                         pkg_cache[cpv] = pkg
11706                 return pkg
11707
11708         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11709         try:
11710                 # At least the parent needs to exist for the lock file.
11711                 portage.util.ensure_dirs(vdb_path)
11712         except portage.exception.PortageException:
11713                 pass
11714         vdb_lock = None
11715         try:
11716                 if os.access(vdb_path, os.W_OK):
11717                         vdb_lock = portage.locks.lockdir(vdb_path)
11718                 realsyslist = sets["system"].getAtoms()
11719                 syslist = []
11720                 for x in realsyslist:
11721                         mycp = portage.dep_getkey(x)
11722                         if mycp in settings.getvirtuals():
11723                                 providers = []
11724                                 for provider in settings.getvirtuals()[mycp]:
11725                                         if vartree.dbapi.match(provider):
11726                                                 providers.append(provider)
11727                                 if len(providers) == 1:
11728                                         syslist.extend(providers)
11729                         else:
11730                                 syslist.append(mycp)
11731         
11732                 mysettings = portage.config(clone=settings)
11733         
11734                 if not unmerge_files:
11735                         if unmerge_action == "unmerge":
11736                                 print
11737                                 print bold("emerge unmerge") + " can only be used with specific package names"
11738                                 print
11739                                 return 0
11740                         else:
11741                                 global_unmerge = 1
11742         
11743                 localtree = vartree
11744                 # process all arguments and add all
11745                 # valid db entries to candidate_catpkgs
11746                 if global_unmerge:
11747                         if not unmerge_files:
11748                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11749                 else:
11750                         #we've got command-line arguments
11751                         if not unmerge_files:
11752                                 print "\nNo packages to unmerge have been provided.\n"
11753                                 return 0
11754                         for x in unmerge_files:
11755                                 arg_parts = x.split('/')
11756                                 if x[0] not in [".","/"] and \
11757                                         arg_parts[-1][-7:] != ".ebuild":
11758                                         #possible cat/pkg or dep; treat as such
11759                                         candidate_catpkgs.append(x)
11760                                 elif unmerge_action in ["prune","clean"]:
11761                                         print "\n!!! Prune and clean do not accept individual" + \
11762                                                 " ebuilds as arguments;\n    skipping.\n"
11763                                         continue
11764                                 else:
11765                                         # it appears that the user is specifying an installed
11766                                         # ebuild and we're in "unmerge" mode, so it's ok.
11767                                         if not os.path.exists(x):
11768                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11769                                                 return 0
11770         
11771                                         absx   = os.path.abspath(x)
11772                                         sp_absx = absx.split("/")
11773                                         if sp_absx[-1][-7:] == ".ebuild":
11774                                                 del sp_absx[-1]
11775                                                 absx = "/".join(sp_absx)
11776         
11777                                         sp_absx_len = len(sp_absx)
11778         
11779                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11780                                         vdb_len  = len(vdb_path)
11781         
11782                                         sp_vdb     = vdb_path.split("/")
11783                                         sp_vdb_len = len(sp_vdb)
11784         
11785                                         if not os.path.exists(absx+"/CONTENTS"):
11786                                                 print "!!! Not a valid db dir: "+str(absx)
11787                                                 return 0
11788         
11789                                         if sp_absx_len <= sp_vdb_len:
11790                                                 # The Path is shorter... so it can't be inside the vdb.
11791                                                 print sp_absx
11792                                                 print absx
11793                                                 print "\n!!!",x,"cannot be inside "+ \
11794                                                         vdb_path+"; aborting.\n"
11795                                                 return 0
11796         
11797                                         for idx in range(0,sp_vdb_len):
11798                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11799                                                         print sp_absx
11800                                                         print absx
11801                                                         print "\n!!!", x, "is not inside "+\
11802                                                                 vdb_path+"; aborting.\n"
11803                                                         return 0
11804         
11805                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11806                                         candidate_catpkgs.append(
11807                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11808         
11809                 newline=""
11810                 if (not "--quiet" in myopts):
11811                         newline="\n"
11812                 if settings["ROOT"] != "/":
11813                         writemsg_level(darkgreen(newline+ \
11814                                 ">>> Using system located in ROOT tree %s\n" % \
11815                                 settings["ROOT"]))
11816
11817                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11818                         not ("--quiet" in myopts):
11819                         writemsg_level(darkgreen(newline+\
11820                                 ">>> These are the packages that would be unmerged:\n"))
11821
11822                 # Preservation of order is required for --depclean and --prune so
11823                 # that dependencies are respected. Use all_selected to eliminate
11824                 # duplicate packages since the same package may be selected by
11825                 # multiple atoms.
11826                 pkgmap = []
11827                 all_selected = set()
11828                 for x in candidate_catpkgs:
11829                         # cycle through all our candidate deps and determine
11830                         # what will and will not get unmerged
11831                         try:
11832                                 mymatch = vartree.dbapi.match(x)
11833                         except portage.exception.AmbiguousPackageName, errpkgs:
11834                                 print "\n\n!!! The short ebuild name \"" + \
11835                                         x + "\" is ambiguous.  Please specify"
11836                                 print "!!! one of the following fully-qualified " + \
11837                                         "ebuild names instead:\n"
11838                                 for i in errpkgs[0]:
11839                                         print "    " + green(i)
11840                                 print
11841                                 sys.exit(1)
11842         
11843                         if not mymatch and x[0] not in "<>=~":
11844                                 mymatch = localtree.dep_match(x)
11845                         if not mymatch:
11846                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11847                                         (x, unmerge_action), noiselevel=-1)
11848                                 continue
11849
11850                         pkgmap.append(
11851                                 {"protected": set(), "selected": set(), "omitted": set()})
11852                         mykey = len(pkgmap) - 1
11853                         if unmerge_action=="unmerge":
11854                                         for y in mymatch:
11855                                                 if y not in all_selected:
11856                                                         pkgmap[mykey]["selected"].add(y)
11857                                                         all_selected.add(y)
11858                         elif unmerge_action == "prune":
11859                                 if len(mymatch) == 1:
11860                                         continue
11861                                 best_version = mymatch[0]
11862                                 best_slot = vartree.getslot(best_version)
11863                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11864                                 for mypkg in mymatch[1:]:
11865                                         myslot = vartree.getslot(mypkg)
11866                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11867                                         if (myslot == best_slot and mycounter > best_counter) or \
11868                                                 mypkg == portage.best([mypkg, best_version]):
11869                                                 if myslot == best_slot:
11870                                                         if mycounter < best_counter:
11871                                                                 # On slot collision, keep the one with the
11872                                                                 # highest counter since it is the most
11873                                                                 # recently installed.
11874                                                                 continue
11875                                                 best_version = mypkg
11876                                                 best_slot = myslot
11877                                                 best_counter = mycounter
11878                                 pkgmap[mykey]["protected"].add(best_version)
11879                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11880                                         if mypkg != best_version and mypkg not in all_selected)
11881                                 all_selected.update(pkgmap[mykey]["selected"])
11882                         else:
11883                                 # unmerge_action == "clean"
11884                                 slotmap={}
11885                                 for mypkg in mymatch:
11886                                         if unmerge_action == "clean":
11887                                                 myslot = localtree.getslot(mypkg)
11888                                         else:
11889                                                 # since we're pruning, we don't care about slots
11890                                                 # and put all the pkgs in together
11891                                                 myslot = 0
11892                                         if myslot not in slotmap:
11893                                                 slotmap[myslot] = {}
11894                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11895
11896                                 for mypkg in vartree.dbapi.cp_list(
11897                                         portage.dep_getkey(mymatch[0])):
11898                                         myslot = vartree.getslot(mypkg)
11899                                         if myslot not in slotmap:
11900                                                 slotmap[myslot] = {}
11901                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11902
11903                                 for myslot in slotmap:
11904                                         counterkeys = slotmap[myslot].keys()
11905                                         if not counterkeys:
11906                                                 continue
11907                                         counterkeys.sort()
11908                                         pkgmap[mykey]["protected"].add(
11909                                                 slotmap[myslot][counterkeys[-1]])
11910                                         del counterkeys[-1]
11911
11912                                         for counter in counterkeys[:]:
11913                                                 mypkg = slotmap[myslot][counter]
11914                                                 if mypkg not in mymatch:
11915                                                         counterkeys.remove(counter)
11916                                                         pkgmap[mykey]["protected"].add(
11917                                                                 slotmap[myslot][counter])
11918
11919                                         #be pretty and get them in order of merge:
11920                                         for ckey in counterkeys:
11921                                                 mypkg = slotmap[myslot][ckey]
11922                                                 if mypkg not in all_selected:
11923                                                         pkgmap[mykey]["selected"].add(mypkg)
11924                                                         all_selected.add(mypkg)
11925                                         # ok, now the last-merged package
11926                                         # is protected, and the rest are selected
11927                 numselected = len(all_selected)
11928                 if global_unmerge and not numselected:
11929                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11930                         return 0
11931         
11932                 if not numselected:
11933                         portage.writemsg_stdout(
11934                                 "\n>>> No packages selected for removal by " + \
11935                                 unmerge_action + "\n")
11936                         return 0
11937         finally:
11938                 if vdb_lock:
11939                         vartree.dbapi.flush_cache()
11940                         portage.locks.unlockdir(vdb_lock)
11941         
11942         from portage.sets.base import EditablePackageSet
11943         
11944         # generate a list of package sets that are directly or indirectly listed in "world",
11945         # as there is no persistent list of "installed" sets
11946         installed_sets = ["world"]
11947         stop = False
11948         pos = 0
11949         while not stop:
11950                 stop = True
11951                 pos = len(installed_sets)
11952                 for s in installed_sets[pos - 1:]:
11953                         if s not in sets:
11954                                 continue
11955                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11956                         if candidates:
11957                                 stop = False
11958                                 installed_sets += candidates
11959         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11960         del stop, pos
11961
11962         # we don't want to unmerge packages that are still listed in user-editable package sets
11963         # listed in "world" as they would be remerged on the next update of "world" or the 
11964         # relevant package sets.
11965         unknown_sets = set()
11966         for cp in xrange(len(pkgmap)):
11967                 for cpv in pkgmap[cp]["selected"].copy():
11968                         try:
11969                                 pkg = _pkg(cpv)
11970                         except KeyError:
11971                                 # It could have been uninstalled
11972                                 # by a concurrent process.
11973                                 continue
11974
11975                         if unmerge_action != "clean" and \
11976                                 root_config.root == "/" and \
11977                                 portage.match_from_list(
11978                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11979                                 msg = ("Not unmerging package %s since there is no valid " + \
11980                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11981                                 for line in textwrap.wrap(msg, 75):
11982                                         out.eerror(line)
11983                                 # adjust pkgmap so the display output is correct
11984                                 pkgmap[cp]["selected"].remove(cpv)
11985                                 all_selected.remove(cpv)
11986                                 pkgmap[cp]["protected"].add(cpv)
11987                                 continue
11988
11989                         parents = []
11990                         for s in installed_sets:
11991                                 # skip sets that the user requested to unmerge, and skip world 
11992                                 # unless we're unmerging a package set (as the package would be 
11993                                 # removed from "world" later on)
11994                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11995                                         continue
11996
11997                                 if s not in sets:
11998                                         if s in unknown_sets:
11999                                                 continue
12000                                         unknown_sets.add(s)
12001                                         out = portage.output.EOutput()
12002                                         out.eerror(("Unknown set '@%s' in " + \
12003                                                 "%svar/lib/portage/world_sets") % \
12004                                                 (s, root_config.root))
12005                                         continue
12006
12007                                 # only check instances of EditablePackageSet as other classes are generally used for
12008                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12009                                 # user can't do much about them anyway)
12010                                 if isinstance(sets[s], EditablePackageSet):
12011
12012                                         # This is derived from a snippet of code in the
12013                                         # depgraph._iter_atoms_for_pkg() method.
12014                                         for atom in sets[s].iterAtomsForPackage(pkg):
12015                                                 inst_matches = vartree.dbapi.match(atom)
12016                                                 inst_matches.reverse() # descending order
12017                                                 higher_slot = None
12018                                                 for inst_cpv in inst_matches:
12019                                                         try:
12020                                                                 inst_pkg = _pkg(inst_cpv)
12021                                                         except KeyError:
12022                                                                 # It could have been uninstalled
12023                                                                 # by a concurrent process.
12024                                                                 continue
12025
12026                                                         if inst_pkg.cp != atom.cp:
12027                                                                 continue
12028                                                         if pkg >= inst_pkg:
12029                                                                 # This is descending order, and we're not
12030                                                                 # interested in any versions <= pkg given.
12031                                                                 break
12032                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12033                                                                 higher_slot = inst_pkg
12034                                                                 break
12035                                                 if higher_slot is None:
12036                                                         parents.append(s)
12037                                                         break
12038                         if parents:
12039                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12040                                 #print colorize("WARN", "but still listed in the following package sets:")
12041                                 #print "    %s\n" % ", ".join(parents)
12042                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12043                                 print colorize("WARN", "still referenced by the following package sets:")
12044                                 print "    %s\n" % ", ".join(parents)
12045                                 # adjust pkgmap so the display output is correct
12046                                 pkgmap[cp]["selected"].remove(cpv)
12047                                 all_selected.remove(cpv)
12048                                 pkgmap[cp]["protected"].add(cpv)
12049         
12050         del installed_sets
12051
12052         numselected = len(all_selected)
12053         if not numselected:
12054                 writemsg_level(
12055                         "\n>>> No packages selected for removal by " + \
12056                         unmerge_action + "\n")
12057                 return 0
12058
12059         # Unmerge order only matters in some cases
12060         if not ordered:
12061                 unordered = {}
12062                 for d in pkgmap:
12063                         selected = d["selected"]
12064                         if not selected:
12065                                 continue
12066                         cp = portage.cpv_getkey(iter(selected).next())
12067                         cp_dict = unordered.get(cp)
12068                         if cp_dict is None:
12069                                 cp_dict = {}
12070                                 unordered[cp] = cp_dict
12071                                 for k in d:
12072                                         cp_dict[k] = set()
12073                         for k, v in d.iteritems():
12074                                 cp_dict[k].update(v)
12075                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12076
12077         for x in xrange(len(pkgmap)):
12078                 selected = pkgmap[x]["selected"]
12079                 if not selected:
12080                         continue
12081                 for mytype, mylist in pkgmap[x].iteritems():
12082                         if mytype == "selected":
12083                                 continue
12084                         mylist.difference_update(all_selected)
12085                 cp = portage.cpv_getkey(iter(selected).next())
12086                 for y in localtree.dep_match(cp):
12087                         if y not in pkgmap[x]["omitted"] and \
12088                                 y not in pkgmap[x]["selected"] and \
12089                                 y not in pkgmap[x]["protected"] and \
12090                                 y not in all_selected:
12091                                 pkgmap[x]["omitted"].add(y)
12092                 if global_unmerge and not pkgmap[x]["selected"]:
12093                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12094                         continue
12095                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12096                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12097                                 "'%s' is part of your system profile.\n" % cp),
12098                                 level=logging.WARNING, noiselevel=-1)
12099                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12100                                 "be damaging to your system.\n\n"),
12101                                 level=logging.WARNING, noiselevel=-1)
12102                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12103                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12104                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12105                 if not quiet:
12106                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12107                 else:
12108                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12109                 for mytype in ["selected","protected","omitted"]:
12110                         if not quiet:
12111                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12112                         if pkgmap[x][mytype]:
12113                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12114                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12115                                 for pn, ver, rev in sorted_pkgs:
12116                                         if rev == "r0":
12117                                                 myversion = ver
12118                                         else:
12119                                                 myversion = ver + "-" + rev
12120                                         if mytype == "selected":
12121                                                 writemsg_level(
12122                                                         colorize("UNMERGE_WARN", myversion + " "),
12123                                                         noiselevel=-1)
12124                                         else:
12125                                                 writemsg_level(
12126                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12127                         else:
12128                                 writemsg_level("none ", noiselevel=-1)
12129                         if not quiet:
12130                                 writemsg_level("\n", noiselevel=-1)
12131                 if quiet:
12132                         writemsg_level("\n", noiselevel=-1)
12133
12134         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12135                 " packages are slated for removal.\n")
12136         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12137                         " and " + colorize("GOOD", "'omitted'") + \
12138                         " packages will not be removed.\n\n")
12139
12140         if "--pretend" in myopts:
12141                 #we're done... return
12142                 return 0
12143         if "--ask" in myopts:
12144                 if userquery("Would you like to unmerge these packages?")=="No":
12145                         # enter pretend mode for correct formatting of results
12146                         myopts["--pretend"] = True
12147                         print
12148                         print "Quitting."
12149                         print
12150                         return 0
12151         #the real unmerging begins, after a short delay....
12152         if clean_delay and not autoclean:
12153                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12154
12155         for x in xrange(len(pkgmap)):
12156                 for y in pkgmap[x]["selected"]:
12157                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12158                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12159                         mysplit = y.split("/")
12160                         #unmerge...
12161                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12162                                 mysettings, unmerge_action not in ["clean","prune"],
12163                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12164                                 scheduler=scheduler)
12165
12166                         if retval != os.EX_OK:
12167                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12168                                 if raise_on_error:
12169                                         raise UninstallFailure(retval)
12170                                 sys.exit(retval)
12171                         else:
12172                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12173                                         sets["world"].cleanPackage(vartree.dbapi, y)
12174                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12175         if clean_world and hasattr(sets["world"], "remove"):
12176                 for s in root_config.setconfig.active:
12177                         sets["world"].remove(SETPREFIX+s)
12178         return 1
12179
12180 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12181
12182         if os.path.exists("/usr/bin/install-info"):
12183                 out = portage.output.EOutput()
12184                 regen_infodirs=[]
12185                 for z in infodirs:
12186                         if z=='':
12187                                 continue
12188                         inforoot=normpath(root+z)
12189                         if os.path.isdir(inforoot):
12190                                 infomtime = long(os.stat(inforoot).st_mtime)
12191                                 if inforoot not in prev_mtimes or \
12192                                         prev_mtimes[inforoot] != infomtime:
12193                                                 regen_infodirs.append(inforoot)
12194
12195                 if not regen_infodirs:
12196                         portage.writemsg_stdout("\n")
12197                         out.einfo("GNU info directory index is up-to-date.")
12198                 else:
12199                         portage.writemsg_stdout("\n")
12200                         out.einfo("Regenerating GNU info directory index...")
12201
12202                         dir_extensions = ("", ".gz", ".bz2")
12203                         icount=0
12204                         badcount=0
12205                         errmsg = ""
12206                         for inforoot in regen_infodirs:
12207                                 if inforoot=='':
12208                                         continue
12209
12210                                 if not os.path.isdir(inforoot) or \
12211                                         not os.access(inforoot, os.W_OK):
12212                                         continue
12213
12214                                 file_list = os.listdir(inforoot)
12215                                 file_list.sort()
12216                                 dir_file = os.path.join(inforoot, "dir")
12217                                 moved_old_dir = False
12218                                 processed_count = 0
12219                                 for x in file_list:
12220                                         if x.startswith(".") or \
12221                                                 os.path.isdir(os.path.join(inforoot, x)):
12222                                                 continue
12223                                         if x.startswith("dir"):
12224                                                 skip = False
12225                                                 for ext in dir_extensions:
12226                                                         if x == "dir" + ext or \
12227                                                                 x == "dir" + ext + ".old":
12228                                                                 skip = True
12229                                                                 break
12230                                                 if skip:
12231                                                         continue
12232                                         if processed_count == 0:
12233                                                 for ext in dir_extensions:
12234                                                         try:
12235                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12236                                                                 moved_old_dir = True
12237                                                         except EnvironmentError, e:
12238                                                                 if e.errno != errno.ENOENT:
12239                                                                         raise
12240                                                                 del e
12241                                         processed_count += 1
12242                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12243                                         existsstr="already exists, for file `"
12244                                         if myso!="":
12245                                                 if re.search(existsstr,myso):
12246                                                         # Already exists... Don't increment the count for this.
12247                                                         pass
12248                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12249                                                         # This info file doesn't contain a DIR-header: install-info produces this
12250                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12251                                                         # Don't increment the count for this.
12252                                                         pass
12253                                                 else:
12254                                                         badcount=badcount+1
12255                                                         errmsg += myso + "\n"
12256                                         icount=icount+1
12257
12258                                 if moved_old_dir and not os.path.exists(dir_file):
12259                                         # We didn't generate a new dir file, so put the old file
12260                                         # back where it was originally found.
12261                                         for ext in dir_extensions:
12262                                                 try:
12263                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12264                                                 except EnvironmentError, e:
12265                                                         if e.errno != errno.ENOENT:
12266                                                                 raise
12267                                                         del e
12268
12269                                 # Clean dir.old cruft so that they don't prevent
12270                                 # unmerge of otherwise empty directories.
12271                                 for ext in dir_extensions:
12272                                         try:
12273                                                 os.unlink(dir_file + ext + ".old")
12274                                         except EnvironmentError, e:
12275                                                 if e.errno != errno.ENOENT:
12276                                                         raise
12277                                                 del e
12278
12279                                 #update mtime so we can potentially avoid regenerating.
12280                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12281
12282                         if badcount:
12283                                 out.eerror("Processed %d info files; %d errors." % \
12284                                         (icount, badcount))
12285                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12286                         else:
12287                                 if icount > 0:
12288                                         out.einfo("Processed %d info files." % (icount,))
12289
12290
12291 def display_news_notification(root_config, myopts):
12292         target_root = root_config.root
12293         trees = root_config.trees
12294         settings = trees["vartree"].settings
12295         portdb = trees["porttree"].dbapi
12296         vardb = trees["vartree"].dbapi
12297         NEWS_PATH = os.path.join("metadata", "news")
12298         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12299         newsReaderDisplay = False
12300         update = "--pretend" not in myopts
12301
12302         for repo in portdb.getRepositories():
12303                 unreadItems = checkUpdatedNewsItems(
12304                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12305                 if unreadItems:
12306                         if not newsReaderDisplay:
12307                                 newsReaderDisplay = True
12308                                 print
12309                         print colorize("WARN", " * IMPORTANT:"),
12310                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12311                         
12312         
12313         if newsReaderDisplay:
12314                 print colorize("WARN", " *"),
12315                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12316                 print
12317
12318 def display_preserved_libs(vardbapi):
12319         MAX_DISPLAY = 3
12320
12321         # Ensure the registry is consistent with existing files.
12322         vardbapi.plib_registry.pruneNonExisting()
12323
12324         if vardbapi.plib_registry.hasEntries():
12325                 print
12326                 print colorize("WARN", "!!!") + " existing preserved libs:"
12327                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12328                 linkmap = vardbapi.linkmap
12329                 consumer_map = {}
12330                 owners = {}
12331                 linkmap_broken = False
12332
12333                 try:
12334                         linkmap.rebuild()
12335                 except portage.exception.CommandNotFound, e:
12336                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12337                                 level=logging.ERROR, noiselevel=-1)
12338                         del e
12339                         linkmap_broken = True
12340                 else:
12341                         search_for_owners = set()
12342                         for cpv in plibdata:
12343                                 internal_plib_keys = set(linkmap._obj_key(f) \
12344                                         for f in plibdata[cpv])
12345                                 for f in plibdata[cpv]:
12346                                         if f in consumer_map:
12347                                                 continue
12348                                         consumers = []
12349                                         for c in linkmap.findConsumers(f):
12350                                                 # Filter out any consumers that are also preserved libs
12351                                                 # belonging to the same package as the provider.
12352                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12353                                                         consumers.append(c)
12354                                         consumers.sort()
12355                                         consumer_map[f] = consumers
12356                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12357
12358                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12359
12360                 for cpv in plibdata:
12361                         print colorize("WARN", ">>>") + " package: %s" % cpv
12362                         samefile_map = {}
12363                         for f in plibdata[cpv]:
12364                                 obj_key = linkmap._obj_key(f)
12365                                 alt_paths = samefile_map.get(obj_key)
12366                                 if alt_paths is None:
12367                                         alt_paths = set()
12368                                         samefile_map[obj_key] = alt_paths
12369                                 alt_paths.add(f)
12370
12371                         for alt_paths in samefile_map.itervalues():
12372                                 alt_paths = sorted(alt_paths)
12373                                 for p in alt_paths:
12374                                         print colorize("WARN", " * ") + " - %s" % (p,)
12375                                 f = alt_paths[0]
12376                                 consumers = consumer_map.get(f, [])
12377                                 for c in consumers[:MAX_DISPLAY]:
12378                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12379                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12380                                 if len(consumers) == MAX_DISPLAY + 1:
12381                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12382                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12383                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12384                                 elif len(consumers) > MAX_DISPLAY:
12385                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12386                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12387
12388
12389 def _flush_elog_mod_echo():
12390         """
12391         Dump the mod_echo output now so that our other
12392         notifications are shown last.
12393         @rtype: bool
12394         @returns: True if messages were shown, False otherwise.
12395         """
12396         messages_shown = False
12397         try:
12398                 from portage.elog import mod_echo
12399         except ImportError:
12400                 pass # happens during downgrade to a version without the module
12401         else:
12402                 messages_shown = bool(mod_echo._items)
12403                 mod_echo.finalize()
12404         return messages_shown
12405
12406 def post_emerge(root_config, myopts, mtimedb, retval):
12407         """
12408         Misc. things to run at the end of a merge session.
12409         
12410         Update Info Files
12411         Update Config Files
12412         Update News Items
12413         Commit mtimeDB
12414         Display preserved libs warnings
12415         Exit Emerge
12416
12417         @param trees: A dictionary mapping each ROOT to it's package databases
12418         @type trees: dict
12419         @param mtimedb: The mtimeDB to store data needed across merge invocations
12420         @type mtimedb: MtimeDB class instance
12421         @param retval: Emerge's return value
12422         @type retval: Int
12423         @rype: None
12424         @returns:
12425         1.  Calls sys.exit(retval)
12426         """
12427
12428         target_root = root_config.root
12429         trees = { target_root : root_config.trees }
12430         vardbapi = trees[target_root]["vartree"].dbapi
12431         settings = vardbapi.settings
12432         info_mtimes = mtimedb["info"]
12433
12434         # Load the most current variables from ${ROOT}/etc/profile.env
12435         settings.unlock()
12436         settings.reload()
12437         settings.regenerate()
12438         settings.lock()
12439
12440         config_protect = settings.get("CONFIG_PROTECT","").split()
12441         infodirs = settings.get("INFOPATH","").split(":") + \
12442                 settings.get("INFODIR","").split(":")
12443
12444         os.chdir("/")
12445
12446         if retval == os.EX_OK:
12447                 exit_msg = " *** exiting successfully."
12448         else:
12449                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12450         emergelog("notitles" not in settings.features, exit_msg)
12451
12452         _flush_elog_mod_echo()
12453
12454         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12455         if "--pretend" in myopts or (counter_hash is not None and \
12456                 counter_hash == vardbapi._counter_hash()):
12457                 display_news_notification(root_config, myopts)
12458                 # If vdb state has not changed then there's nothing else to do.
12459                 sys.exit(retval)
12460
12461         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12462         portage.util.ensure_dirs(vdb_path)
12463         vdb_lock = None
12464         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12465                 vdb_lock = portage.locks.lockdir(vdb_path)
12466
12467         if vdb_lock:
12468                 try:
12469                         if "noinfo" not in settings.features:
12470                                 chk_updated_info_files(target_root,
12471                                         infodirs, info_mtimes, retval)
12472                         mtimedb.commit()
12473                 finally:
12474                         if vdb_lock:
12475                                 portage.locks.unlockdir(vdb_lock)
12476
12477         chk_updated_cfg_files(target_root, config_protect)
12478         
12479         display_news_notification(root_config, myopts)
12480         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12481                 display_preserved_libs(vardbapi)        
12482
12483         sys.exit(retval)
12484
12485
12486 def chk_updated_cfg_files(target_root, config_protect):
12487         if config_protect:
12488                 #number of directories with some protect files in them
12489                 procount=0
12490                 for x in config_protect:
12491                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12492                         if not os.access(x, os.W_OK):
12493                                 # Avoid Permission denied errors generated
12494                                 # later by `find`.
12495                                 continue
12496                         try:
12497                                 mymode = os.lstat(x).st_mode
12498                         except OSError:
12499                                 continue
12500                         if stat.S_ISLNK(mymode):
12501                                 # We want to treat it like a directory if it
12502                                 # is a symlink to an existing directory.
12503                                 try:
12504                                         real_mode = os.stat(x).st_mode
12505                                         if stat.S_ISDIR(real_mode):
12506                                                 mymode = real_mode
12507                                 except OSError:
12508                                         pass
12509                         if stat.S_ISDIR(mymode):
12510                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12511                         else:
12512                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12513                                         os.path.split(x.rstrip(os.path.sep))
12514                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12515                         a = commands.getstatusoutput(mycommand)
12516                         if a[0] != 0:
12517                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12518                                 sys.stderr.flush()
12519                                 # Show the error message alone, sending stdout to /dev/null.
12520                                 os.system(mycommand + " 1>/dev/null")
12521                         else:
12522                                 files = a[1].split('\0')
12523                                 # split always produces an empty string as the last element
12524                                 if files and not files[-1]:
12525                                         del files[-1]
12526                                 if files:
12527                                         procount += 1
12528                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12529                                         if stat.S_ISDIR(mymode):
12530                                                  print "%d config files in '%s' need updating." % \
12531                                                         (len(files), x)
12532                                         else:
12533                                                  print "config file '%s' needs updating." % x
12534
12535                 if procount:
12536                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12537                                 " section of the " + bold("emerge")
12538                         print " "+yellow("*")+" man page to learn how to update config files."
12539
12540 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12541         update=False):
12542         """
12543         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12544         Returns the number of unread (yet relevent) items.
12545         
12546         @param portdb: a portage tree database
12547         @type portdb: pordbapi
12548         @param vardb: an installed package database
12549         @type vardb: vardbapi
12550         @param NEWS_PATH:
12551         @type NEWS_PATH:
12552         @param UNREAD_PATH:
12553         @type UNREAD_PATH:
12554         @param repo_id:
12555         @type repo_id:
12556         @rtype: Integer
12557         @returns:
12558         1.  The number of unread but relevant news items.
12559         
12560         """
12561         from portage.news import NewsManager
12562         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12563         return manager.getUnreadItems( repo_id, update=update )
12564
12565 def insert_category_into_atom(atom, category):
12566         alphanum = re.search(r'\w', atom)
12567         if alphanum:
12568                 ret = atom[:alphanum.start()] + "%s/" % category + \
12569                         atom[alphanum.start():]
12570         else:
12571                 ret = None
12572         return ret
12573
12574 def is_valid_package_atom(x):
12575         if "/" not in x:
12576                 alphanum = re.search(r'\w', x)
12577                 if alphanum:
12578                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12579         return portage.isvalidatom(x)
12580
12581 def show_blocker_docs_link():
12582         print
12583         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12584         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12585         print
12586         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12587         print
12588
12589 def show_mask_docs():
12590         print "For more information, see the MASKED PACKAGES section in the emerge"
12591         print "man page or refer to the Gentoo Handbook."
12592
12593 def action_sync(settings, trees, mtimedb, myopts, myaction):
12594         xterm_titles = "notitles" not in settings.features
12595         emergelog(xterm_titles, " === sync")
12596         myportdir = settings.get("PORTDIR", None)
12597         out = portage.output.EOutput()
12598         if not myportdir:
12599                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12600                 sys.exit(1)
12601         if myportdir[-1]=="/":
12602                 myportdir=myportdir[:-1]
12603         try:
12604                 st = os.stat(myportdir)
12605         except OSError:
12606                 st = None
12607         if st is None:
12608                 print ">>>",myportdir,"not found, creating it."
12609                 os.makedirs(myportdir,0755)
12610                 st = os.stat(myportdir)
12611
12612         spawn_kwargs = {}
12613         spawn_kwargs["env"] = settings.environ()
12614         if 'usersync' in settings.features and \
12615                 portage.data.secpass >= 2 and \
12616                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12617                 st.st_gid != os.getgid() and st.st_mode & 0070):
12618                 try:
12619                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12620                 except KeyError:
12621                         pass
12622                 else:
12623                         # Drop privileges when syncing, in order to match
12624                         # existing uid/gid settings.
12625                         spawn_kwargs["uid"]    = st.st_uid
12626                         spawn_kwargs["gid"]    = st.st_gid
12627                         spawn_kwargs["groups"] = [st.st_gid]
12628                         spawn_kwargs["env"]["HOME"] = homedir
12629                         umask = 0002
12630                         if not st.st_mode & 0020:
12631                                 umask = umask | 0020
12632                         spawn_kwargs["umask"] = umask
12633
12634         syncuri = settings.get("SYNC", "").strip()
12635         if not syncuri:
12636                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12637                         noiselevel=-1, level=logging.ERROR)
12638                 return 1
12639
12640         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12641         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12642
12643         os.umask(0022)
12644         dosyncuri = syncuri
12645         updatecache_flg = False
12646         if myaction == "metadata":
12647                 print "skipping sync"
12648                 updatecache_flg = True
12649         elif ".git" in vcs_dirs:
12650                 # Update existing git repository, and ignore the syncuri. We are
12651                 # going to trust the user and assume that the user is in the branch
12652                 # that he/she wants updated. We'll let the user manage branches with
12653                 # git directly.
12654                 if portage.process.find_binary("git") is None:
12655                         msg = ["Command not found: git",
12656                         "Type \"emerge dev-util/git\" to enable git support."]
12657                         for l in msg:
12658                                 writemsg_level("!!! %s\n" % l,
12659                                         level=logging.ERROR, noiselevel=-1)
12660                         return 1
12661                 msg = ">>> Starting git pull in %s..." % myportdir
12662                 emergelog(xterm_titles, msg )
12663                 writemsg_level(msg + "\n")
12664                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12665                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12666                 if exitcode != os.EX_OK:
12667                         msg = "!!! git pull error in %s." % myportdir
12668                         emergelog(xterm_titles, msg)
12669                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12670                         return exitcode
12671                 msg = ">>> Git pull in %s successful" % myportdir
12672                 emergelog(xterm_titles, msg)
12673                 writemsg_level(msg + "\n")
12674                 exitcode = git_sync_timestamps(settings, myportdir)
12675                 if exitcode == os.EX_OK:
12676                         updatecache_flg = True
12677         elif syncuri[:8]=="rsync://":
12678                 for vcs_dir in vcs_dirs:
12679                         writemsg_level(("!!! %s appears to be under revision " + \
12680                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12681                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12682                         return 1
12683                 if not os.path.exists("/usr/bin/rsync"):
12684                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12685                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12686                         sys.exit(1)
12687                 mytimeout=180
12688
12689                 rsync_opts = []
12690                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12691                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12692                         rsync_opts.extend([
12693                                 "--recursive",    # Recurse directories
12694                                 "--links",        # Consider symlinks
12695                                 "--safe-links",   # Ignore links outside of tree
12696                                 "--perms",        # Preserve permissions
12697                                 "--times",        # Preserive mod times
12698                                 "--compress",     # Compress the data transmitted
12699                                 "--force",        # Force deletion on non-empty dirs
12700                                 "--whole-file",   # Don't do block transfers, only entire files
12701                                 "--delete",       # Delete files that aren't in the master tree
12702                                 "--stats",        # Show final statistics about what was transfered
12703                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12704                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12705                                 "--exclude=/local",       # Exclude local     from consideration
12706                                 "--exclude=/packages",    # Exclude packages  from consideration
12707                         ])
12708
12709                 else:
12710                         # The below validation is not needed when using the above hardcoded
12711                         # defaults.
12712
12713                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12714                         rsync_opts.extend(
12715                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12716                         for opt in ("--recursive", "--times"):
12717                                 if opt not in rsync_opts:
12718                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12719                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12720                                         rsync_opts.append(opt)
12721         
12722                         for exclude in ("distfiles", "local", "packages"):
12723                                 opt = "--exclude=/%s" % exclude
12724                                 if opt not in rsync_opts:
12725                                         portage.writemsg(yellow("WARNING:") + \
12726                                         " adding required option %s not included in "  % opt + \
12727                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12728                                         rsync_opts.append(opt)
12729         
12730                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12731                                 def rsync_opt_startswith(opt_prefix):
12732                                         for x in rsync_opts:
12733                                                 if x.startswith(opt_prefix):
12734                                                         return True
12735                                         return False
12736
12737                                 if not rsync_opt_startswith("--timeout="):
12738                                         rsync_opts.append("--timeout=%d" % mytimeout)
12739
12740                                 for opt in ("--compress", "--whole-file"):
12741                                         if opt not in rsync_opts:
12742                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12743                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12744                                                 rsync_opts.append(opt)
12745
12746                 if "--quiet" in myopts:
12747                         rsync_opts.append("--quiet")    # Shut up a lot
12748                 else:
12749                         rsync_opts.append("--verbose")  # Print filelist
12750
12751                 if "--verbose" in myopts:
12752                         rsync_opts.append("--progress")  # Progress meter for each file
12753
12754                 if "--debug" in myopts:
12755                         rsync_opts.append("--checksum") # Force checksum on all files
12756
12757                 # Real local timestamp file.
12758                 servertimestampfile = os.path.join(
12759                         myportdir, "metadata", "timestamp.chk")
12760
12761                 content = portage.util.grabfile(servertimestampfile)
12762                 mytimestamp = 0
12763                 if content:
12764                         try:
12765                                 mytimestamp = time.mktime(time.strptime(content[0],
12766                                         "%a, %d %b %Y %H:%M:%S +0000"))
12767                         except (OverflowError, ValueError):
12768                                 pass
12769                 del content
12770
12771                 try:
12772                         rsync_initial_timeout = \
12773                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12774                 except ValueError:
12775                         rsync_initial_timeout = 15
12776
12777                 try:
12778                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12779                 except SystemExit, e:
12780                         raise # Needed else can't exit
12781                 except:
12782                         maxretries=3 #default number of retries
12783
12784                 retries=0
12785                 user_name, hostname, port = re.split(
12786                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12787                 if port is None:
12788                         port=""
12789                 if user_name is None:
12790                         user_name=""
12791                 updatecache_flg=True
12792                 all_rsync_opts = set(rsync_opts)
12793                 extra_rsync_opts = shlex.split(
12794                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12795                 all_rsync_opts.update(extra_rsync_opts)
12796                 family = socket.AF_INET
12797                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12798                         family = socket.AF_INET
12799                 elif socket.has_ipv6 and \
12800                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12801                         family = socket.AF_INET6
12802                 ips=[]
12803                 SERVER_OUT_OF_DATE = -1
12804                 EXCEEDED_MAX_RETRIES = -2
12805                 while (1):
12806                         if ips:
12807                                 del ips[0]
12808                         if ips==[]:
12809                                 try:
12810                                         for addrinfo in socket.getaddrinfo(
12811                                                 hostname, None, family, socket.SOCK_STREAM):
12812                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12813                                                         # IPv6 addresses need to be enclosed in square brackets
12814                                                         ips.append("[%s]" % addrinfo[4][0])
12815                                                 else:
12816                                                         ips.append(addrinfo[4][0])
12817                                         from random import shuffle
12818                                         shuffle(ips)
12819                                 except SystemExit, e:
12820                                         raise # Needed else can't exit
12821                                 except Exception, e:
12822                                         print "Notice:",str(e)
12823                                         dosyncuri=syncuri
12824
12825                         if ips:
12826                                 try:
12827                                         dosyncuri = syncuri.replace(
12828                                                 "//" + user_name + hostname + port + "/",
12829                                                 "//" + user_name + ips[0] + port + "/", 1)
12830                                 except SystemExit, e:
12831                                         raise # Needed else can't exit
12832                                 except Exception, e:
12833                                         print "Notice:",str(e)
12834                                         dosyncuri=syncuri
12835
12836                         if (retries==0):
12837                                 if "--ask" in myopts:
12838                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12839                                                 print
12840                                                 print "Quitting."
12841                                                 print
12842                                                 sys.exit(0)
12843                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12844                                 if "--quiet" not in myopts:
12845                                         print ">>> Starting rsync with "+dosyncuri+"..."
12846                         else:
12847                                 emergelog(xterm_titles,
12848                                         ">>> Starting retry %d of %d with %s" % \
12849                                                 (retries,maxretries,dosyncuri))
12850                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12851
12852                         if mytimestamp != 0 and "--quiet" not in myopts:
12853                                 print ">>> Checking server timestamp ..."
12854
12855                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12856
12857                         if "--debug" in myopts:
12858                                 print rsynccommand
12859
12860                         exitcode = os.EX_OK
12861                         servertimestamp = 0
12862                         # Even if there's no timestamp available locally, fetch the
12863                         # timestamp anyway as an initial probe to verify that the server is
12864                         # responsive.  This protects us from hanging indefinitely on a
12865                         # connection attempt to an unresponsive server which rsync's
12866                         # --timeout option does not prevent.
12867                         if True:
12868                                 # Temporary file for remote server timestamp comparison.
12869                                 from tempfile import mkstemp
12870                                 fd, tmpservertimestampfile = mkstemp()
12871                                 os.close(fd)
12872                                 mycommand = rsynccommand[:]
12873                                 mycommand.append(dosyncuri.rstrip("/") + \
12874                                         "/metadata/timestamp.chk")
12875                                 mycommand.append(tmpservertimestampfile)
12876                                 content = None
12877                                 mypids = []
12878                                 try:
12879                                         def timeout_handler(signum, frame):
12880                                                 raise portage.exception.PortageException("timed out")
12881                                         signal.signal(signal.SIGALRM, timeout_handler)
12882                                         # Timeout here in case the server is unresponsive.  The
12883                                         # --timeout rsync option doesn't apply to the initial
12884                                         # connection attempt.
12885                                         if rsync_initial_timeout:
12886                                                 signal.alarm(rsync_initial_timeout)
12887                                         try:
12888                                                 mypids.extend(portage.process.spawn(
12889                                                         mycommand, env=settings.environ(), returnpid=True))
12890                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12891                                                 content = portage.grabfile(tmpservertimestampfile)
12892                                         finally:
12893                                                 if rsync_initial_timeout:
12894                                                         signal.alarm(0)
12895                                                 try:
12896                                                         os.unlink(tmpservertimestampfile)
12897                                                 except OSError:
12898                                                         pass
12899                                 except portage.exception.PortageException, e:
12900                                         # timed out
12901                                         print e
12902                                         del e
12903                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12904                                                 os.kill(mypids[0], signal.SIGTERM)
12905                                                 os.waitpid(mypids[0], 0)
12906                                         # This is the same code rsync uses for timeout.
12907                                         exitcode = 30
12908                                 else:
12909                                         if exitcode != os.EX_OK:
12910                                                 if exitcode & 0xff:
12911                                                         exitcode = (exitcode & 0xff) << 8
12912                                                 else:
12913                                                         exitcode = exitcode >> 8
12914                                 if mypids:
12915                                         portage.process.spawned_pids.remove(mypids[0])
12916                                 if content:
12917                                         try:
12918                                                 servertimestamp = time.mktime(time.strptime(
12919                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12920                                         except (OverflowError, ValueError):
12921                                                 pass
12922                                 del mycommand, mypids, content
12923                         if exitcode == os.EX_OK:
12924                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12925                                         emergelog(xterm_titles,
12926                                                 ">>> Cancelling sync -- Already current.")
12927                                         print
12928                                         print ">>>"
12929                                         print ">>> Timestamps on the server and in the local repository are the same."
12930                                         print ">>> Cancelling all further sync action. You are already up to date."
12931                                         print ">>>"
12932                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12933                                         print ">>>"
12934                                         print
12935                                         sys.exit(0)
12936                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12937                                         emergelog(xterm_titles,
12938                                                 ">>> Server out of date: %s" % dosyncuri)
12939                                         print
12940                                         print ">>>"
12941                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12942                                         print ">>>"
12943                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12944                                         print ">>>"
12945                                         print
12946                                         exitcode = SERVER_OUT_OF_DATE
12947                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12948                                         # actual sync
12949                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12950                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12951                                         if exitcode in [0,1,3,4,11,14,20,21]:
12952                                                 break
12953                         elif exitcode in [1,3,4,11,14,20,21]:
12954                                 break
12955                         else:
12956                                 # Code 2 indicates protocol incompatibility, which is expected
12957                                 # for servers with protocol < 29 that don't support
12958                                 # --prune-empty-directories.  Retry for a server that supports
12959                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12960                                 pass
12961
12962                         retries=retries+1
12963
12964                         if retries<=maxretries:
12965                                 print ">>> Retrying..."
12966                                 time.sleep(11)
12967                         else:
12968                                 # over retries
12969                                 # exit loop
12970                                 updatecache_flg=False
12971                                 exitcode = EXCEEDED_MAX_RETRIES
12972                                 break
12973
12974                 if (exitcode==0):
12975                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12976                 elif exitcode == SERVER_OUT_OF_DATE:
12977                         sys.exit(1)
12978                 elif exitcode == EXCEEDED_MAX_RETRIES:
12979                         sys.stderr.write(
12980                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12981                         sys.exit(1)
12982                 elif (exitcode>0):
12983                         msg = []
12984                         if exitcode==1:
12985                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12986                                 msg.append("that your SYNC statement is proper.")
12987                                 msg.append("SYNC=" + settings["SYNC"])
12988                         elif exitcode==11:
12989                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12990                                 msg.append("this means your disk is full, but can be caused by corruption")
12991                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12992                                 msg.append("and try again after the problem has been fixed.")
12993                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12994                         elif exitcode==20:
12995                                 msg.append("Rsync was killed before it finished.")
12996                         else:
12997                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12998                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12999                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13000                                 msg.append("temporary problem unless complications exist with your network")
13001                                 msg.append("(and possibly your system's filesystem) configuration.")
13002                         for line in msg:
13003                                 out.eerror(line)
13004                         sys.exit(exitcode)
13005         elif syncuri[:6]=="cvs://":
13006                 if not os.path.exists("/usr/bin/cvs"):
13007                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13008                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13009                         sys.exit(1)
13010                 cvsroot=syncuri[6:]
13011                 cvsdir=os.path.dirname(myportdir)
13012                 if not os.path.exists(myportdir+"/CVS"):
13013                         #initial checkout
13014                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13015                         if os.path.exists(cvsdir+"/gentoo-x86"):
13016                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13017                                 sys.exit(1)
13018                         try:
13019                                 os.rmdir(myportdir)
13020                         except OSError, e:
13021                                 if e.errno != errno.ENOENT:
13022                                         sys.stderr.write(
13023                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13024                                         sys.exit(1)
13025                                 del e
13026                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13027                                 print "!!! cvs checkout error; exiting."
13028                                 sys.exit(1)
13029                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13030                 else:
13031                         #cvs update
13032                         print ">>> Starting cvs update with "+syncuri+"..."
13033                         retval = portage.process.spawn_bash(
13034                                 "cd %s; cvs -z0 -q update -dP" % \
13035                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13036                         if retval != os.EX_OK:
13037                                 sys.exit(retval)
13038                 dosyncuri = syncuri
13039         else:
13040                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13041                         noiselevel=-1, level=logging.ERROR)
13042                 return 1
13043
13044         if updatecache_flg and  \
13045                 myaction != "metadata" and \
13046                 "metadata-transfer" not in settings.features:
13047                 updatecache_flg = False
13048
13049         # Reload the whole config from scratch.
13050         settings, trees, mtimedb = load_emerge_config(trees=trees)
13051         root_config = trees[settings["ROOT"]]["root_config"]
13052         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13053
13054         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13055                 action_metadata(settings, portdb, myopts)
13056
13057         if portage._global_updates(trees, mtimedb["updates"]):
13058                 mtimedb.commit()
13059                 # Reload the whole config from scratch.
13060                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13061                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13062                 root_config = trees[settings["ROOT"]]["root_config"]
13063
13064         mybestpv = portdb.xmatch("bestmatch-visible",
13065                 portage.const.PORTAGE_PACKAGE_ATOM)
13066         mypvs = portage.best(
13067                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13068                 portage.const.PORTAGE_PACKAGE_ATOM))
13069
13070         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13071
13072         if myaction != "metadata":
13073                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13074                         retval = portage.process.spawn(
13075                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13076                                 dosyncuri], env=settings.environ())
13077                         if retval != os.EX_OK:
13078                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13079
13080         if(mybestpv != mypvs) and not "--quiet" in myopts:
13081                 print
13082                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13083                 print red(" * ")+"that you update portage now, before any other packages are updated."
13084                 print
13085                 print red(" * ")+"To update portage, run 'emerge portage' now."
13086                 print
13087         
13088         display_news_notification(root_config, myopts)
13089         return os.EX_OK
13090
13091 def git_sync_timestamps(settings, portdir):
13092         """
13093         Since git doesn't preserve timestamps, synchronize timestamps between
13094         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13095         for a given file as long as the file in the working tree is not modified
13096         (relative to HEAD).
13097         """
13098         cache_dir = os.path.join(portdir, "metadata", "cache")
13099         if not os.path.isdir(cache_dir):
13100                 return os.EX_OK
13101         writemsg_level(">>> Synchronizing timestamps...\n")
13102
13103         from portage.cache.cache_errors import CacheError
13104         try:
13105                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13106                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13107         except CacheError, e:
13108                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13109                         level=logging.ERROR, noiselevel=-1)
13110                 return 1
13111
13112         ec_dir = os.path.join(portdir, "eclass")
13113         try:
13114                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13115                         if f.endswith(".eclass"))
13116         except OSError, e:
13117                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13118                         level=logging.ERROR, noiselevel=-1)
13119                 return 1
13120
13121         args = [portage.const.BASH_BINARY, "-c",
13122                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13123                 portage._shell_quote(portdir)]
13124         import subprocess
13125         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13126         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13127         rval = proc.wait()
13128         if rval != os.EX_OK:
13129                 return rval
13130
13131         modified_eclasses = set(ec for ec in ec_names \
13132                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13133
13134         updated_ec_mtimes = {}
13135
13136         for cpv in cache_db:
13137                 cpv_split = portage.catpkgsplit(cpv)
13138                 if cpv_split is None:
13139                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13140                                 level=logging.ERROR, noiselevel=-1)
13141                         continue
13142
13143                 cat, pn, ver, rev = cpv_split
13144                 cat, pf = portage.catsplit(cpv)
13145                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13146                 if relative_eb_path in modified_files:
13147                         continue
13148
13149                 try:
13150                         cache_entry = cache_db[cpv]
13151                         eb_mtime = cache_entry.get("_mtime_")
13152                         ec_mtimes = cache_entry.get("_eclasses_")
13153                 except KeyError:
13154                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13155                                 level=logging.ERROR, noiselevel=-1)
13156                         continue
13157                 except CacheError, e:
13158                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13159                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13160                         continue
13161
13162                 if eb_mtime is None:
13163                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13164                                 level=logging.ERROR, noiselevel=-1)
13165                         continue
13166
13167                 try:
13168                         eb_mtime = long(eb_mtime)
13169                 except ValueError:
13170                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13171                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13172                         continue
13173
13174                 if ec_mtimes is None:
13175                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13176                                 level=logging.ERROR, noiselevel=-1)
13177                         continue
13178
13179                 if modified_eclasses.intersection(ec_mtimes):
13180                         continue
13181
13182                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13183                 if missing_eclasses:
13184                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13185                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13186                                 noiselevel=-1)
13187                         continue
13188
13189                 eb_path = os.path.join(portdir, relative_eb_path)
13190                 try:
13191                         current_eb_mtime = os.stat(eb_path)
13192                 except OSError:
13193                         writemsg_level("!!! Missing ebuild: %s\n" % \
13194                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13195                         continue
13196
13197                 inconsistent = False
13198                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13199                         updated_mtime = updated_ec_mtimes.get(ec)
13200                         if updated_mtime is not None and updated_mtime != ec_mtime:
13201                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13202                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13203                                 inconsistent = True
13204                                 break
13205
13206                 if inconsistent:
13207                         continue
13208
13209                 if current_eb_mtime != eb_mtime:
13210                         os.utime(eb_path, (eb_mtime, eb_mtime))
13211
13212                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13213                         if ec in updated_ec_mtimes:
13214                                 continue
13215                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13216                         current_mtime = long(os.stat(ec_path).st_mtime)
13217                         if current_mtime != ec_mtime:
13218                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13219                         updated_ec_mtimes[ec] = ec_mtime
13220
13221         return os.EX_OK
13222
13223 def action_metadata(settings, portdb, myopts):
13224         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13225         old_umask = os.umask(0002)
13226         cachedir = os.path.normpath(settings.depcachedir)
13227         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13228                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13229                                         "/sys", "/tmp", "/usr",  "/var"]:
13230                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13231                         "ROOT DIRECTORY ON YOUR SYSTEM."
13232                 print >> sys.stderr, \
13233                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13234                 sys.exit(73)
13235         if not os.path.exists(cachedir):
13236                 os.mkdir(cachedir)
13237
13238         ec = portage.eclass_cache.cache(portdb.porttree_root)
13239         myportdir = os.path.realpath(settings["PORTDIR"])
13240         cm = settings.load_best_module("portdbapi.metadbmodule")(
13241                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13242
13243         from portage.cache import util
13244
13245         class percentage_noise_maker(util.quiet_mirroring):
13246                 def __init__(self, dbapi):
13247                         self.dbapi = dbapi
13248                         self.cp_all = dbapi.cp_all()
13249                         l = len(self.cp_all)
13250                         self.call_update_min = 100000000
13251                         self.min_cp_all = l/100.0
13252                         self.count = 1
13253                         self.pstr = ''
13254
13255                 def __iter__(self):
13256                         for x in self.cp_all:
13257                                 self.count += 1
13258                                 if self.count > self.min_cp_all:
13259                                         self.call_update_min = 0
13260                                         self.count = 0
13261                                 for y in self.dbapi.cp_list(x):
13262                                         yield y
13263                         self.call_update_mine = 0
13264
13265                 def update(self, *arg):
13266                         try:
13267                                 self.pstr = int(self.pstr) + 1
13268                         except ValueError:
13269                                 self.pstr = 1
13270                         sys.stdout.write("%s%i%%" % \
13271                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13272                         sys.stdout.flush()
13273                         self.call_update_min = 10000000
13274
13275                 def finish(self, *arg):
13276                         sys.stdout.write("\b\b\b\b100%\n")
13277                         sys.stdout.flush()
13278
13279         if "--quiet" in myopts:
13280                 def quicky_cpv_generator(cp_all_list):
13281                         for x in cp_all_list:
13282                                 for y in portdb.cp_list(x):
13283                                         yield y
13284                 source = quicky_cpv_generator(portdb.cp_all())
13285                 noise_maker = portage.cache.util.quiet_mirroring()
13286         else:
13287                 noise_maker = source = percentage_noise_maker(portdb)
13288         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13289                 eclass_cache=ec, verbose_instance=noise_maker)
13290
13291         sys.stdout.flush()
13292         os.umask(old_umask)
13293
13294 def action_regen(settings, portdb, max_jobs, max_load):
13295         xterm_titles = "notitles" not in settings.features
13296         emergelog(xterm_titles, " === regen")
13297         #regenerate cache entries
13298         portage.writemsg_stdout("Regenerating cache entries...\n")
13299         try:
13300                 os.close(sys.stdin.fileno())
13301         except SystemExit, e:
13302                 raise # Needed else can't exit
13303         except:
13304                 pass
13305         sys.stdout.flush()
13306
13307         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13308         regen.run()
13309
13310         portage.writemsg_stdout("done!\n")
13311         return regen.returncode
13312
13313 def action_config(settings, trees, myopts, myfiles):
13314         if len(myfiles) != 1:
13315                 print red("!!! config can only take a single package atom at this time\n")
13316                 sys.exit(1)
13317         if not is_valid_package_atom(myfiles[0]):
13318                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13319                         noiselevel=-1)
13320                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13321                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13322                 sys.exit(1)
13323         print
13324         try:
13325                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13326         except portage.exception.AmbiguousPackageName, e:
13327                 # Multiple matches thrown from cpv_expand
13328                 pkgs = e.args[0]
13329         if len(pkgs) == 0:
13330                 print "No packages found.\n"
13331                 sys.exit(0)
13332         elif len(pkgs) > 1:
13333                 if "--ask" in myopts:
13334                         options = []
13335                         print "Please select a package to configure:"
13336                         idx = 0
13337                         for pkg in pkgs:
13338                                 idx += 1
13339                                 options.append(str(idx))
13340                                 print options[-1]+") "+pkg
13341                         print "X) Cancel"
13342                         options.append("X")
13343                         idx = userquery("Selection?", options)
13344                         if idx == "X":
13345                                 sys.exit(0)
13346                         pkg = pkgs[int(idx)-1]
13347                 else:
13348                         print "The following packages available:"
13349                         for pkg in pkgs:
13350                                 print "* "+pkg
13351                         print "\nPlease use a specific atom or the --ask option."
13352                         sys.exit(1)
13353         else:
13354                 pkg = pkgs[0]
13355
13356         print
13357         if "--ask" in myopts:
13358                 if userquery("Ready to configure "+pkg+"?") == "No":
13359                         sys.exit(0)
13360         else:
13361                 print "Configuring pkg..."
13362         print
13363         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13364         mysettings = portage.config(clone=settings)
13365         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13366         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13367         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13368                 mysettings,
13369                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13370                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13371         if retval == os.EX_OK:
13372                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13373                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13374         print
13375
13376 def action_info(settings, trees, myopts, myfiles):
13377         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13378                 settings.profile_path, settings["CHOST"],
13379                 trees[settings["ROOT"]]["vartree"].dbapi)
13380         header_width = 65
13381         header_title = "System Settings"
13382         if myfiles:
13383                 print header_width * "="
13384                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13385         print header_width * "="
13386         print "System uname: "+platform.platform(aliased=1)
13387
13388         lastSync = portage.grabfile(os.path.join(
13389                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13390         print "Timestamp of tree:",
13391         if lastSync:
13392                 print lastSync[0]
13393         else:
13394                 print "Unknown"
13395
13396         output=commands.getstatusoutput("distcc --version")
13397         if not output[0]:
13398                 print str(output[1].split("\n",1)[0]),
13399                 if "distcc" in settings.features:
13400                         print "[enabled]"
13401                 else:
13402                         print "[disabled]"
13403
13404         output=commands.getstatusoutput("ccache -V")
13405         if not output[0]:
13406                 print str(output[1].split("\n",1)[0]),
13407                 if "ccache" in settings.features:
13408                         print "[enabled]"
13409                 else:
13410                         print "[disabled]"
13411
13412         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13413                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13414         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13415         myvars  = portage.util.unique_array(myvars)
13416         myvars.sort()
13417
13418         for x in myvars:
13419                 if portage.isvalidatom(x):
13420                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13421                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13422                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13423                         pkgs = []
13424                         for pn, ver, rev in pkg_matches:
13425                                 if rev != "r0":
13426                                         pkgs.append(ver + "-" + rev)
13427                                 else:
13428                                         pkgs.append(ver)
13429                         if pkgs:
13430                                 pkgs = ", ".join(pkgs)
13431                                 print "%-20s %s" % (x+":", pkgs)
13432                 else:
13433                         print "%-20s %s" % (x+":", "[NOT VALID]")
13434
13435         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13436
13437         if "--verbose" in myopts:
13438                 myvars=settings.keys()
13439         else:
13440                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13441                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13442                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13443                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13444
13445                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13446
13447         myvars = portage.util.unique_array(myvars)
13448         unset_vars = []
13449         myvars.sort()
13450         for x in myvars:
13451                 if x in settings:
13452                         if x != "USE":
13453                                 print '%s="%s"' % (x, settings[x])
13454                         else:
13455                                 use = set(settings["USE"].split())
13456                                 use_expand = settings["USE_EXPAND"].split()
13457                                 use_expand.sort()
13458                                 for varname in use_expand:
13459                                         flag_prefix = varname.lower() + "_"
13460                                         for f in list(use):
13461                                                 if f.startswith(flag_prefix):
13462                                                         use.remove(f)
13463                                 use = list(use)
13464                                 use.sort()
13465                                 print 'USE="%s"' % " ".join(use),
13466                                 for varname in use_expand:
13467                                         myval = settings.get(varname)
13468                                         if myval:
13469                                                 print '%s="%s"' % (varname, myval),
13470                                 print
13471                 else:
13472                         unset_vars.append(x)
13473         if unset_vars:
13474                 print "Unset:  "+", ".join(unset_vars)
13475         print
13476
13477         if "--debug" in myopts:
13478                 for x in dir(portage):
13479                         module = getattr(portage, x)
13480                         if "cvs_id_string" in dir(module):
13481                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13482
13483         # See if we can find any packages installed matching the strings
13484         # passed on the command line
13485         mypkgs = []
13486         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13487         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13488         for x in myfiles:
13489                 mypkgs.extend(vardb.match(x))
13490
13491         # If some packages were found...
13492         if mypkgs:
13493                 # Get our global settings (we only print stuff if it varies from
13494                 # the current config)
13495                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13496                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13497                 global_vals = {}
13498                 pkgsettings = portage.config(clone=settings)
13499
13500                 for myvar in mydesiredvars:
13501                         global_vals[myvar] = set(settings.get(myvar, "").split())
13502
13503                 # Loop through each package
13504                 # Only print settings if they differ from global settings
13505                 header_title = "Package Settings"
13506                 print header_width * "="
13507                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13508                 print header_width * "="
13509                 from portage.output import EOutput
13510                 out = EOutput()
13511                 for pkg in mypkgs:
13512                         # Get all package specific variables
13513                         auxvalues = vardb.aux_get(pkg, auxkeys)
13514                         valuesmap = {}
13515                         for i in xrange(len(auxkeys)):
13516                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13517                         diff_values = {}
13518                         for myvar in mydesiredvars:
13519                                 # If the package variable doesn't match the
13520                                 # current global variable, something has changed
13521                                 # so set diff_found so we know to print
13522                                 if valuesmap[myvar] != global_vals[myvar]:
13523                                         diff_values[myvar] = valuesmap[myvar]
13524                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13525                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13526                         pkgsettings.reset()
13527                         # If a matching ebuild is no longer available in the tree, maybe it
13528                         # would make sense to compare against the flags for the best
13529                         # available version with the same slot?
13530                         mydb = None
13531                         if portdb.cpv_exists(pkg):
13532                                 mydb = portdb
13533                         pkgsettings.setcpv(pkg, mydb=mydb)
13534                         if valuesmap["IUSE"].intersection(
13535                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13536                                 diff_values["USE"] = valuesmap["USE"]
13537                         # If a difference was found, print the info for
13538                         # this package.
13539                         if diff_values:
13540                                 # Print package info
13541                                 print "%s was built with the following:" % pkg
13542                                 for myvar in mydesiredvars + ["USE"]:
13543                                         if myvar in diff_values:
13544                                                 mylist = list(diff_values[myvar])
13545                                                 mylist.sort()
13546                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13547                                 print
13548                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13549                         ebuildpath = vardb.findname(pkg)
13550                         if not ebuildpath or not os.path.exists(ebuildpath):
13551                                 out.ewarn("No ebuild found for '%s'" % pkg)
13552                                 continue
13553                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13554                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13555                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13556                                 tree="vartree")
13557
13558 def action_search(root_config, myopts, myfiles, spinner):
13559         if not myfiles:
13560                 print "emerge: no search terms provided."
13561         else:
13562                 searchinstance = search(root_config,
13563                         spinner, "--searchdesc" in myopts,
13564                         "--quiet" not in myopts, "--usepkg" in myopts,
13565                         "--usepkgonly" in myopts)
13566                 for mysearch in myfiles:
13567                         try:
13568                                 searchinstance.execute(mysearch)
13569                         except re.error, comment:
13570                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13571                                 sys.exit(1)
13572                         searchinstance.output()
13573
13574 def action_depclean(settings, trees, ldpath_mtimes,
13575         myopts, action, myfiles, spinner):
13576         # Kill packages that aren't explicitly merged or are required as a
13577         # dependency of another package. World file is explicit.
13578
13579         # Global depclean or prune operations are not very safe when there are
13580         # missing dependencies since it's unknown how badly incomplete
13581         # the dependency graph is, and we might accidentally remove packages
13582         # that should have been pulled into the graph. On the other hand, it's
13583         # relatively safe to ignore missing deps when only asked to remove
13584         # specific packages.
13585         allow_missing_deps = len(myfiles) > 0
13586
13587         msg = []
13588         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13589         msg.append("mistakes. Packages that are part of the world set will always\n")
13590         msg.append("be kept.  They can be manually added to this set with\n")
13591         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13592         msg.append("package.provided (see portage(5)) will be removed by\n")
13593         msg.append("depclean, even if they are part of the world set.\n")
13594         msg.append("\n")
13595         msg.append("As a safety measure, depclean will not remove any packages\n")
13596         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13597         msg.append("consequence, it is often necessary to run %s\n" % \
13598                 good("`emerge --update"))
13599         msg.append(good("--newuse --deep @system @world`") + \
13600                 " prior to depclean.\n")
13601
13602         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13603                 portage.writemsg_stdout("\n")
13604                 for x in msg:
13605                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13606
13607         xterm_titles = "notitles" not in settings.features
13608         myroot = settings["ROOT"]
13609         root_config = trees[myroot]["root_config"]
13610         getSetAtoms = root_config.setconfig.getSetAtoms
13611         vardb = trees[myroot]["vartree"].dbapi
13612
13613         required_set_names = ("system", "world")
13614         required_sets = {}
13615         set_args = []
13616
13617         for s in required_set_names:
13618                 required_sets[s] = InternalPackageSet(
13619                         initial_atoms=getSetAtoms(s))
13620
13621         
13622         # When removing packages, use a temporary version of world
13623         # which excludes packages that are intended to be eligible for
13624         # removal.
13625         world_temp_set = required_sets["world"]
13626         system_set = required_sets["system"]
13627
13628         if not system_set or not world_temp_set:
13629
13630                 if not system_set:
13631                         writemsg_level("!!! You have no system list.\n",
13632                                 level=logging.ERROR, noiselevel=-1)
13633
13634                 if not world_temp_set:
13635                         writemsg_level("!!! You have no world file.\n",
13636                                         level=logging.WARNING, noiselevel=-1)
13637
13638                 writemsg_level("!!! Proceeding is likely to " + \
13639                         "break your installation.\n",
13640                         level=logging.WARNING, noiselevel=-1)
13641                 if "--pretend" not in myopts:
13642                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13643
13644         if action == "depclean":
13645                 emergelog(xterm_titles, " >>> depclean")
13646
13647         import textwrap
13648         args_set = InternalPackageSet()
13649         if myfiles:
13650                 for x in myfiles:
13651                         if not is_valid_package_atom(x):
13652                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13653                                         level=logging.ERROR, noiselevel=-1)
13654                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13655                                 return
13656                         try:
13657                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13658                         except portage.exception.AmbiguousPackageName, e:
13659                                 msg = "The short ebuild name \"" + x + \
13660                                         "\" is ambiguous.  Please specify " + \
13661                                         "one of the following " + \
13662                                         "fully-qualified ebuild names instead:"
13663                                 for line in textwrap.wrap(msg, 70):
13664                                         writemsg_level("!!! %s\n" % (line,),
13665                                                 level=logging.ERROR, noiselevel=-1)
13666                                 for i in e[0]:
13667                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13668                                                 level=logging.ERROR, noiselevel=-1)
13669                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13670                                 return
13671                         args_set.add(atom)
13672                 matched_packages = False
13673                 for x in args_set:
13674                         if vardb.match(x):
13675                                 matched_packages = True
13676                                 break
13677                 if not matched_packages:
13678                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13679                                 action)
13680                         return
13681
13682         writemsg_level("\nCalculating dependencies  ")
13683         resolver_params = create_depgraph_params(myopts, "remove")
13684         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13685         vardb = resolver.trees[myroot]["vartree"].dbapi
13686
13687         if action == "depclean":
13688
13689                 if args_set:
13690                         # Pull in everything that's installed but not matched
13691                         # by an argument atom since we don't want to clean any
13692                         # package if something depends on it.
13693
13694                         world_temp_set.clear()
13695                         for pkg in vardb:
13696                                 spinner.update()
13697
13698                                 try:
13699                                         if args_set.findAtomForPackage(pkg) is None:
13700                                                 world_temp_set.add("=" + pkg.cpv)
13701                                                 continue
13702                                 except portage.exception.InvalidDependString, e:
13703                                         show_invalid_depstring_notice(pkg,
13704                                                 pkg.metadata["PROVIDE"], str(e))
13705                                         del e
13706                                         world_temp_set.add("=" + pkg.cpv)
13707                                         continue
13708
13709         elif action == "prune":
13710
13711                 # Pull in everything that's installed since we don't
13712                 # to prune a package if something depends on it.
13713                 world_temp_set.clear()
13714                 world_temp_set.update(vardb.cp_all())
13715
13716                 if not args_set:
13717
13718                         # Try to prune everything that's slotted.
13719                         for cp in vardb.cp_all():
13720                                 if len(vardb.cp_list(cp)) > 1:
13721                                         args_set.add(cp)
13722
13723                 # Remove atoms from world that match installed packages
13724                 # that are also matched by argument atoms, but do not remove
13725                 # them if they match the highest installed version.
13726                 for pkg in vardb:
13727                         spinner.update()
13728                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13729                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13730                                 raise AssertionError("package expected in matches: " + \
13731                                         "cp = %s, cpv = %s matches = %s" % \
13732                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13733
13734                         highest_version = pkgs_for_cp[-1]
13735                         if pkg == highest_version:
13736                                 # pkg is the highest version
13737                                 world_temp_set.add("=" + pkg.cpv)
13738                                 continue
13739
13740                         if len(pkgs_for_cp) <= 1:
13741                                 raise AssertionError("more packages expected: " + \
13742                                         "cp = %s, cpv = %s matches = %s" % \
13743                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13744
13745                         try:
13746                                 if args_set.findAtomForPackage(pkg) is None:
13747                                         world_temp_set.add("=" + pkg.cpv)
13748                                         continue
13749                         except portage.exception.InvalidDependString, e:
13750                                 show_invalid_depstring_notice(pkg,
13751                                         pkg.metadata["PROVIDE"], str(e))
13752                                 del e
13753                                 world_temp_set.add("=" + pkg.cpv)
13754                                 continue
13755
13756         set_args = {}
13757         for s, package_set in required_sets.iteritems():
13758                 set_atom = SETPREFIX + s
13759                 set_arg = SetArg(arg=set_atom, set=package_set,
13760                         root_config=resolver.roots[myroot])
13761                 set_args[s] = set_arg
13762                 for atom in set_arg.set:
13763                         resolver._dep_stack.append(
13764                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13765                         resolver.digraph.add(set_arg, None)
13766
13767         success = resolver._complete_graph()
13768         writemsg_level("\b\b... done!\n")
13769
13770         resolver.display_problems()
13771
13772         if not success:
13773                 return 1
13774
13775         def unresolved_deps():
13776
13777                 unresolvable = set()
13778                 for dep in resolver._initially_unsatisfied_deps:
13779                         if isinstance(dep.parent, Package) and \
13780                                 (dep.priority > UnmergeDepPriority.SOFT):
13781                                 unresolvable.add((dep.atom, dep.parent.cpv))
13782
13783                 if not unresolvable:
13784                         return False
13785
13786                 if unresolvable and not allow_missing_deps:
13787                         prefix = bad(" * ")
13788                         msg = []
13789                         msg.append("Dependencies could not be completely resolved due to")
13790                         msg.append("the following required packages not being installed:")
13791                         msg.append("")
13792                         for atom, parent in unresolvable:
13793                                 msg.append("  %s pulled in by:" % (atom,))
13794                                 msg.append("    %s" % (parent,))
13795                                 msg.append("")
13796                         msg.append("Have you forgotten to run " + \
13797                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13798                         msg.append(("to %s? It may be necessary to manually " + \
13799                                 "uninstall packages that no longer") % action)
13800                         msg.append("exist in the portage tree since " + \
13801                                 "it may not be possible to satisfy their")
13802                         msg.append("dependencies.  Also, be aware of " + \
13803                                 "the --with-bdeps option that is documented")
13804                         msg.append("in " + good("`man emerge`") + ".")
13805                         if action == "prune":
13806                                 msg.append("")
13807                                 msg.append("If you would like to ignore " + \
13808                                         "dependencies then use %s." % good("--nodeps"))
13809                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13810                                 level=logging.ERROR, noiselevel=-1)
13811                         return True
13812                 return False
13813
13814         if unresolved_deps():
13815                 return 1
13816
13817         graph = resolver.digraph.copy()
13818         required_pkgs_total = 0
13819         for node in graph:
13820                 if isinstance(node, Package):
13821                         required_pkgs_total += 1
13822
13823         def show_parents(child_node):
13824                 parent_nodes = graph.parent_nodes(child_node)
13825                 if not parent_nodes:
13826                         # With --prune, the highest version can be pulled in without any
13827                         # real parent since all installed packages are pulled in.  In that
13828                         # case there's nothing to show here.
13829                         return
13830                 parent_strs = []
13831                 for node in parent_nodes:
13832                         parent_strs.append(str(getattr(node, "cpv", node)))
13833                 parent_strs.sort()
13834                 msg = []
13835                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13836                 for parent_str in parent_strs:
13837                         msg.append("    %s\n" % (parent_str,))
13838                 msg.append("\n")
13839                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13840
13841         def cmp_pkg_cpv(pkg1, pkg2):
13842                 """Sort Package instances by cpv."""
13843                 if pkg1.cpv > pkg2.cpv:
13844                         return 1
13845                 elif pkg1.cpv == pkg2.cpv:
13846                         return 0
13847                 else:
13848                         return -1
13849
13850         def create_cleanlist():
13851                 pkgs_to_remove = []
13852
13853                 if action == "depclean":
13854                         if args_set:
13855
13856                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13857                                         arg_atom = None
13858                                         try:
13859                                                 arg_atom = args_set.findAtomForPackage(pkg)
13860                                         except portage.exception.InvalidDependString:
13861                                                 # this error has already been displayed by now
13862                                                 continue
13863
13864                                         if arg_atom:
13865                                                 if pkg not in graph:
13866                                                         pkgs_to_remove.append(pkg)
13867                                                 elif "--verbose" in myopts:
13868                                                         show_parents(pkg)
13869
13870                         else:
13871                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13872                                         if pkg not in graph:
13873                                                 pkgs_to_remove.append(pkg)
13874                                         elif "--verbose" in myopts:
13875                                                 show_parents(pkg)
13876
13877                 elif action == "prune":
13878                         # Prune really uses all installed instead of world. It's not
13879                         # a real reverse dependency so don't display it as such.
13880                         graph.remove(set_args["world"])
13881
13882                         for atom in args_set:
13883                                 for pkg in vardb.match_pkgs(atom):
13884                                         if pkg not in graph:
13885                                                 pkgs_to_remove.append(pkg)
13886                                         elif "--verbose" in myopts:
13887                                                 show_parents(pkg)
13888
13889                 if not pkgs_to_remove:
13890                         writemsg_level(
13891                                 ">>> No packages selected for removal by %s\n" % action)
13892                         if "--verbose" not in myopts:
13893                                 writemsg_level(
13894                                         ">>> To see reverse dependencies, use %s\n" % \
13895                                                 good("--verbose"))
13896                         if action == "prune":
13897                                 writemsg_level(
13898                                         ">>> To ignore dependencies, use %s\n" % \
13899                                                 good("--nodeps"))
13900
13901                 return pkgs_to_remove
13902
13903         cleanlist = create_cleanlist()
13904
13905         if len(cleanlist):
13906                 clean_set = set(cleanlist)
13907
13908                 # Check if any of these package are the sole providers of libraries
13909                 # with consumers that have not been selected for removal. If so, these
13910                 # packages and any dependencies need to be added to the graph.
13911                 real_vardb = trees[myroot]["vartree"].dbapi
13912                 linkmap = real_vardb.linkmap
13913                 liblist = linkmap.listLibraryObjects()
13914                 consumer_cache = {}
13915                 provider_cache = {}
13916                 soname_cache = {}
13917                 consumer_map = {}
13918
13919                 writemsg_level(">>> Checking for lib consumers...\n")
13920
13921                 for pkg in cleanlist:
13922                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13923                         provided_libs = set()
13924
13925                         for lib in liblist:
13926                                 if pkg_dblink.isowner(lib, myroot):
13927                                         provided_libs.add(lib)
13928
13929                         if not provided_libs:
13930                                 continue
13931
13932                         consumers = {}
13933                         for lib in provided_libs:
13934                                 lib_consumers = consumer_cache.get(lib)
13935                                 if lib_consumers is None:
13936                                         lib_consumers = linkmap.findConsumers(lib)
13937                                         consumer_cache[lib] = lib_consumers
13938                                 if lib_consumers:
13939                                         consumers[lib] = lib_consumers
13940
13941                         if not consumers:
13942                                 continue
13943
13944                         for lib, lib_consumers in consumers.items():
13945                                 for consumer_file in list(lib_consumers):
13946                                         if pkg_dblink.isowner(consumer_file, myroot):
13947                                                 lib_consumers.remove(consumer_file)
13948                                 if not lib_consumers:
13949                                         del consumers[lib]
13950
13951                         if not consumers:
13952                                 continue
13953
13954                         for lib, lib_consumers in consumers.iteritems():
13955
13956                                 soname = soname_cache.get(lib)
13957                                 if soname is None:
13958                                         soname = linkmap.getSoname(lib)
13959                                         soname_cache[lib] = soname
13960
13961                                 consumer_providers = []
13962                                 for lib_consumer in lib_consumers:
13963                                         providers = provider_cache.get(lib)
13964                                         if providers is None:
13965                                                 providers = linkmap.findProviders(lib_consumer)
13966                                                 provider_cache[lib_consumer] = providers
13967                                         if soname not in providers:
13968                                                 # Why does this happen?
13969                                                 continue
13970                                         consumer_providers.append(
13971                                                 (lib_consumer, providers[soname]))
13972
13973                                 consumers[lib] = consumer_providers
13974
13975                         consumer_map[pkg] = consumers
13976
13977                 if consumer_map:
13978
13979                         search_files = set()
13980                         for consumers in consumer_map.itervalues():
13981                                 for lib, consumer_providers in consumers.iteritems():
13982                                         for lib_consumer, providers in consumer_providers:
13983                                                 search_files.add(lib_consumer)
13984                                                 search_files.update(providers)
13985
13986                         writemsg_level(">>> Assigning files to packages...\n")
13987                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13988
13989                         for pkg, consumers in consumer_map.items():
13990                                 for lib, consumer_providers in consumers.items():
13991                                         lib_consumers = set()
13992
13993                                         for lib_consumer, providers in consumer_providers:
13994                                                 owner_set = file_owners.get(lib_consumer)
13995                                                 provider_dblinks = set()
13996                                                 provider_pkgs = set()
13997
13998                                                 if len(providers) > 1:
13999                                                         for provider in providers:
14000                                                                 provider_set = file_owners.get(provider)
14001                                                                 if provider_set is not None:
14002                                                                         provider_dblinks.update(provider_set)
14003
14004                                                 if len(provider_dblinks) > 1:
14005                                                         for provider_dblink in provider_dblinks:
14006                                                                 pkg_key = ("installed", myroot,
14007                                                                         provider_dblink.mycpv, "nomerge")
14008                                                                 if pkg_key not in clean_set:
14009                                                                         provider_pkgs.add(vardb.get(pkg_key))
14010
14011                                                 if provider_pkgs:
14012                                                         continue
14013
14014                                                 if owner_set is not None:
14015                                                         lib_consumers.update(owner_set)
14016
14017                                         for consumer_dblink in list(lib_consumers):
14018                                                 if ("installed", myroot, consumer_dblink.mycpv,
14019                                                         "nomerge") in clean_set:
14020                                                         lib_consumers.remove(consumer_dblink)
14021                                                         continue
14022
14023                                         if lib_consumers:
14024                                                 consumers[lib] = lib_consumers
14025                                         else:
14026                                                 del consumers[lib]
14027                                 if not consumers:
14028                                         del consumer_map[pkg]
14029
14030                 if consumer_map:
14031                         # TODO: Implement a package set for rebuilding consumer packages.
14032
14033                         msg = "In order to avoid breakage of link level " + \
14034                                 "dependencies, one or more packages will not be removed. " + \
14035                                 "This can be solved by rebuilding " + \
14036                                 "the packages that pulled them in."
14037
14038                         prefix = bad(" * ")
14039                         from textwrap import wrap
14040                         writemsg_level("".join(prefix + "%s\n" % line for \
14041                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14042
14043                         msg = []
14044                         for pkg, consumers in consumer_map.iteritems():
14045                                 unique_consumers = set(chain(*consumers.values()))
14046                                 unique_consumers = sorted(consumer.mycpv \
14047                                         for consumer in unique_consumers)
14048                                 msg.append("")
14049                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14050                                 for consumer in unique_consumers:
14051                                         msg.append("    %s" % (consumer,))
14052                         msg.append("")
14053                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14054                                 level=logging.WARNING, noiselevel=-1)
14055
14056                         # Add lib providers to the graph as children of lib consumers,
14057                         # and also add any dependencies pulled in by the provider.
14058                         writemsg_level(">>> Adding lib providers to graph...\n")
14059
14060                         for pkg, consumers in consumer_map.iteritems():
14061                                 for consumer_dblink in set(chain(*consumers.values())):
14062                                         consumer_pkg = vardb.get(("installed", myroot,
14063                                                 consumer_dblink.mycpv, "nomerge"))
14064                                         if not resolver._add_pkg(pkg,
14065                                                 Dependency(parent=consumer_pkg,
14066                                                 priority=UnmergeDepPriority(runtime=True),
14067                                                 root=pkg.root)):
14068                                                 resolver.display_problems()
14069                                                 return 1
14070
14071                         writemsg_level("\nCalculating dependencies  ")
14072                         success = resolver._complete_graph()
14073                         writemsg_level("\b\b... done!\n")
14074                         resolver.display_problems()
14075                         if not success:
14076                                 return 1
14077                         if unresolved_deps():
14078                                 return 1
14079
14080                         graph = resolver.digraph.copy()
14081                         required_pkgs_total = 0
14082                         for node in graph:
14083                                 if isinstance(node, Package):
14084                                         required_pkgs_total += 1
14085                         cleanlist = create_cleanlist()
14086                         if not cleanlist:
14087                                 return 0
14088                         clean_set = set(cleanlist)
14089
14090                 # Use a topological sort to create an unmerge order such that
14091                 # each package is unmerged before it's dependencies. This is
14092                 # necessary to avoid breaking things that may need to run
14093                 # during pkg_prerm or pkg_postrm phases.
14094
14095                 # Create a new graph to account for dependencies between the
14096                 # packages being unmerged.
14097                 graph = digraph()
14098                 del cleanlist[:]
14099
14100                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14101                 runtime = UnmergeDepPriority(runtime=True)
14102                 runtime_post = UnmergeDepPriority(runtime_post=True)
14103                 buildtime = UnmergeDepPriority(buildtime=True)
14104                 priority_map = {
14105                         "RDEPEND": runtime,
14106                         "PDEPEND": runtime_post,
14107                         "DEPEND": buildtime,
14108                 }
14109
14110                 for node in clean_set:
14111                         graph.add(node, None)
14112                         mydeps = []
14113                         node_use = node.metadata["USE"].split()
14114                         for dep_type in dep_keys:
14115                                 depstr = node.metadata[dep_type]
14116                                 if not depstr:
14117                                         continue
14118                                 try:
14119                                         portage.dep._dep_check_strict = False
14120                                         success, atoms = portage.dep_check(depstr, None, settings,
14121                                                 myuse=node_use, trees=resolver._graph_trees,
14122                                                 myroot=myroot)
14123                                 finally:
14124                                         portage.dep._dep_check_strict = True
14125                                 if not success:
14126                                         # Ignore invalid deps of packages that will
14127                                         # be uninstalled anyway.
14128                                         continue
14129
14130                                 priority = priority_map[dep_type]
14131                                 for atom in atoms:
14132                                         if not isinstance(atom, portage.dep.Atom):
14133                                                 # Ignore invalid atoms returned from dep_check().
14134                                                 continue
14135                                         if atom.blocker:
14136                                                 continue
14137                                         matches = vardb.match_pkgs(atom)
14138                                         if not matches:
14139                                                 continue
14140                                         for child_node in matches:
14141                                                 if child_node in clean_set:
14142                                                         graph.add(child_node, node, priority=priority)
14143
14144                 ordered = True
14145                 if len(graph.order) == len(graph.root_nodes()):
14146                         # If there are no dependencies between packages
14147                         # let unmerge() group them by cat/pn.
14148                         ordered = False
14149                         cleanlist = [pkg.cpv for pkg in graph.order]
14150                 else:
14151                         # Order nodes from lowest to highest overall reference count for
14152                         # optimal root node selection.
14153                         node_refcounts = {}
14154                         for node in graph.order:
14155                                 node_refcounts[node] = len(graph.parent_nodes(node))
14156                         def cmp_reference_count(node1, node2):
14157                                 return node_refcounts[node1] - node_refcounts[node2]
14158                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14159         
14160                         ignore_priority_range = [None]
14161                         ignore_priority_range.extend(
14162                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14163                         while not graph.empty():
14164                                 for ignore_priority in ignore_priority_range:
14165                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14166                                         if nodes:
14167                                                 break
14168                                 if not nodes:
14169                                         raise AssertionError("no root nodes")
14170                                 if ignore_priority is not None:
14171                                         # Some deps have been dropped due to circular dependencies,
14172                                         # so only pop one node in order do minimize the number that
14173                                         # are dropped.
14174                                         del nodes[1:]
14175                                 for node in nodes:
14176                                         graph.remove(node)
14177                                         cleanlist.append(node.cpv)
14178
14179                 unmerge(root_config, myopts, "unmerge", cleanlist,
14180                         ldpath_mtimes, ordered=ordered)
14181
14182         if action == "prune":
14183                 return
14184
14185         if not cleanlist and "--quiet" in myopts:
14186                 return
14187
14188         print "Packages installed:   "+str(len(vardb.cpv_all()))
14189         print "Packages in world:    " + \
14190                 str(len(root_config.sets["world"].getAtoms()))
14191         print "Packages in system:   " + \
14192                 str(len(root_config.sets["system"].getAtoms()))
14193         print "Required packages:    "+str(required_pkgs_total)
14194         if "--pretend" in myopts:
14195                 print "Number to remove:     "+str(len(cleanlist))
14196         else:
14197                 print "Number removed:       "+str(len(cleanlist))
14198
14199 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14200         """
14201         Construct a depgraph for the given resume list. This will raise
14202         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14203         @rtype: tuple
14204         @returns: (success, depgraph, dropped_tasks)
14205         """
14206         skip_masked = True
14207         skip_unsatisfied = True
14208         mergelist = mtimedb["resume"]["mergelist"]
14209         dropped_tasks = set()
14210         while True:
14211                 mydepgraph = depgraph(settings, trees,
14212                         myopts, myparams, spinner)
14213                 try:
14214                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14215                                 skip_masked=skip_masked)
14216                 except depgraph.UnsatisfiedResumeDep, e:
14217                         if not skip_unsatisfied:
14218                                 raise
14219
14220                         graph = mydepgraph.digraph
14221                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14222                                 for dep in e.value)
14223                         traversed_nodes = set()
14224                         unsatisfied_stack = list(unsatisfied_parents)
14225                         while unsatisfied_stack:
14226                                 pkg = unsatisfied_stack.pop()
14227                                 if pkg in traversed_nodes:
14228                                         continue
14229                                 traversed_nodes.add(pkg)
14230
14231                                 # If this package was pulled in by a parent
14232                                 # package scheduled for merge, removing this
14233                                 # package may cause the the parent package's
14234                                 # dependency to become unsatisfied.
14235                                 for parent_node in graph.parent_nodes(pkg):
14236                                         if not isinstance(parent_node, Package) \
14237                                                 or parent_node.operation not in ("merge", "nomerge"):
14238                                                 continue
14239                                         unsatisfied = \
14240                                                 graph.child_nodes(parent_node,
14241                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14242                                         if pkg in unsatisfied:
14243                                                 unsatisfied_parents[parent_node] = parent_node
14244                                                 unsatisfied_stack.append(parent_node)
14245
14246                         pruned_mergelist = []
14247                         for x in mergelist:
14248                                 if isinstance(x, list) and \
14249                                         tuple(x) not in unsatisfied_parents:
14250                                         pruned_mergelist.append(x)
14251
14252                         # If the mergelist doesn't shrink then this loop is infinite.
14253                         if len(pruned_mergelist) == len(mergelist):
14254                                 # This happens if a package can't be dropped because
14255                                 # it's already installed, but it has unsatisfied PDEPEND.
14256                                 raise
14257                         mergelist[:] = pruned_mergelist
14258
14259                         # Exclude installed packages that have been removed from the graph due
14260                         # to failure to build/install runtime dependencies after the dependent
14261                         # package has already been installed.
14262                         dropped_tasks.update(pkg for pkg in \
14263                                 unsatisfied_parents if pkg.operation != "nomerge")
14264                         mydepgraph.break_refs(unsatisfied_parents)
14265
14266                         del e, graph, traversed_nodes, \
14267                                 unsatisfied_parents, unsatisfied_stack
14268                         continue
14269                 else:
14270                         break
14271         return (success, mydepgraph, dropped_tasks)
14272
14273 def action_build(settings, trees, mtimedb,
14274         myopts, myaction, myfiles, spinner):
14275
14276         # validate the state of the resume data
14277         # so that we can make assumptions later.
14278         for k in ("resume", "resume_backup"):
14279                 if k not in mtimedb:
14280                         continue
14281                 resume_data = mtimedb[k]
14282                 if not isinstance(resume_data, dict):
14283                         del mtimedb[k]
14284                         continue
14285                 mergelist = resume_data.get("mergelist")
14286                 if not isinstance(mergelist, list):
14287                         del mtimedb[k]
14288                         continue
14289                 for x in mergelist:
14290                         if not (isinstance(x, list) and len(x) == 4):
14291                                 continue
14292                         pkg_type, pkg_root, pkg_key, pkg_action = x
14293                         if pkg_root not in trees:
14294                                 # Current $ROOT setting differs,
14295                                 # so the list must be stale.
14296                                 mergelist = None
14297                                 break
14298                 if not mergelist:
14299                         del mtimedb[k]
14300                         continue
14301                 resume_opts = resume_data.get("myopts")
14302                 if not isinstance(resume_opts, (dict, list)):
14303                         del mtimedb[k]
14304                         continue
14305                 favorites = resume_data.get("favorites")
14306                 if not isinstance(favorites, list):
14307                         del mtimedb[k]
14308                         continue
14309
14310         resume = False
14311         if "--resume" in myopts and \
14312                 ("resume" in mtimedb or
14313                 "resume_backup" in mtimedb):
14314                 resume = True
14315                 if "resume" not in mtimedb:
14316                         mtimedb["resume"] = mtimedb["resume_backup"]
14317                         del mtimedb["resume_backup"]
14318                         mtimedb.commit()
14319                 # "myopts" is a list for backward compatibility.
14320                 resume_opts = mtimedb["resume"].get("myopts", [])
14321                 if isinstance(resume_opts, list):
14322                         resume_opts = dict((k,True) for k in resume_opts)
14323                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14324                         resume_opts.pop(opt, None)
14325                 myopts.update(resume_opts)
14326
14327                 if "--debug" in myopts:
14328                         writemsg_level("myopts %s\n" % (myopts,))
14329
14330                 # Adjust config according to options of the command being resumed.
14331                 for myroot in trees:
14332                         mysettings =  trees[myroot]["vartree"].settings
14333                         mysettings.unlock()
14334                         adjust_config(myopts, mysettings)
14335                         mysettings.lock()
14336                         del myroot, mysettings
14337
14338         ldpath_mtimes = mtimedb["ldpath"]
14339         favorites=[]
14340         merge_count = 0
14341         buildpkgonly = "--buildpkgonly" in myopts
14342         pretend = "--pretend" in myopts
14343         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14344         ask = "--ask" in myopts
14345         nodeps = "--nodeps" in myopts
14346         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14347         tree = "--tree" in myopts
14348         if nodeps and tree:
14349                 tree = False
14350                 del myopts["--tree"]
14351                 portage.writemsg(colorize("WARN", " * ") + \
14352                         "--tree is broken with --nodeps. Disabling...\n")
14353         debug = "--debug" in myopts
14354         verbose = "--verbose" in myopts
14355         quiet = "--quiet" in myopts
14356         if pretend or fetchonly:
14357                 # make the mtimedb readonly
14358                 mtimedb.filename = None
14359         if '--digest' in myopts or 'digest' in settings.features:
14360                 if '--digest' in myopts:
14361                         msg = "The --digest option"
14362                 else:
14363                         msg = "The FEATURES=digest setting"
14364
14365                 msg += " can prevent corruption from being" + \
14366                         " noticed. The `repoman manifest` command is the preferred" + \
14367                         " way to generate manifests and it is capable of doing an" + \
14368                         " entire repository or category at once."
14369                 prefix = bad(" * ")
14370                 writemsg(prefix + "\n")
14371                 from textwrap import wrap
14372                 for line in wrap(msg, 72):
14373                         writemsg("%s%s\n" % (prefix, line))
14374                 writemsg(prefix + "\n")
14375
14376         if "--quiet" not in myopts and \
14377                 ("--pretend" in myopts or "--ask" in myopts or \
14378                 "--tree" in myopts or "--verbose" in myopts):
14379                 action = ""
14380                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14381                         action = "fetched"
14382                 elif "--buildpkgonly" in myopts:
14383                         action = "built"
14384                 else:
14385                         action = "merged"
14386                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14387                         print
14388                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14389                         print
14390                 else:
14391                         print
14392                         print darkgreen("These are the packages that would be %s, in order:") % action
14393                         print
14394
14395         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14396         if not show_spinner:
14397                 spinner.update = spinner.update_quiet
14398
14399         if resume:
14400                 favorites = mtimedb["resume"].get("favorites")
14401                 if not isinstance(favorites, list):
14402                         favorites = []
14403
14404                 if show_spinner:
14405                         print "Calculating dependencies  ",
14406                 myparams = create_depgraph_params(myopts, myaction)
14407
14408                 resume_data = mtimedb["resume"]
14409                 mergelist = resume_data["mergelist"]
14410                 if mergelist and "--skipfirst" in myopts:
14411                         for i, task in enumerate(mergelist):
14412                                 if isinstance(task, list) and \
14413                                         task and task[-1] == "merge":
14414                                         del mergelist[i]
14415                                         break
14416
14417                 success = False
14418                 mydepgraph = None
14419                 try:
14420                         success, mydepgraph, dropped_tasks = resume_depgraph(
14421                                 settings, trees, mtimedb, myopts, myparams, spinner)
14422                 except (portage.exception.PackageNotFound,
14423                         depgraph.UnsatisfiedResumeDep), e:
14424                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14425                                 mydepgraph = e.depgraph
14426                         if show_spinner:
14427                                 print
14428                         from textwrap import wrap
14429                         from portage.output import EOutput
14430                         out = EOutput()
14431
14432                         resume_data = mtimedb["resume"]
14433                         mergelist = resume_data.get("mergelist")
14434                         if not isinstance(mergelist, list):
14435                                 mergelist = []
14436                         if mergelist and debug or (verbose and not quiet):
14437                                 out.eerror("Invalid resume list:")
14438                                 out.eerror("")
14439                                 indent = "  "
14440                                 for task in mergelist:
14441                                         if isinstance(task, list):
14442                                                 out.eerror(indent + str(tuple(task)))
14443                                 out.eerror("")
14444
14445                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14446                                 out.eerror("One or more packages are either masked or " + \
14447                                         "have missing dependencies:")
14448                                 out.eerror("")
14449                                 indent = "  "
14450                                 for dep in e.value:
14451                                         if dep.atom is None:
14452                                                 out.eerror(indent + "Masked package:")
14453                                                 out.eerror(2 * indent + str(dep.parent))
14454                                                 out.eerror("")
14455                                         else:
14456                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14457                                                 out.eerror(2 * indent + str(dep.parent))
14458                                                 out.eerror("")
14459                                 msg = "The resume list contains packages " + \
14460                                         "that are either masked or have " + \
14461                                         "unsatisfied dependencies. " + \
14462                                         "Please restart/continue " + \
14463                                         "the operation manually, or use --skipfirst " + \
14464                                         "to skip the first package in the list and " + \
14465                                         "any other packages that may be " + \
14466                                         "masked or have missing dependencies."
14467                                 for line in wrap(msg, 72):
14468                                         out.eerror(line)
14469                         elif isinstance(e, portage.exception.PackageNotFound):
14470                                 out.eerror("An expected package is " + \
14471                                         "not available: %s" % str(e))
14472                                 out.eerror("")
14473                                 msg = "The resume list contains one or more " + \
14474                                         "packages that are no longer " + \
14475                                         "available. Please restart/continue " + \
14476                                         "the operation manually."
14477                                 for line in wrap(msg, 72):
14478                                         out.eerror(line)
14479                 else:
14480                         if show_spinner:
14481                                 print "\b\b... done!"
14482
14483                 if success:
14484                         if dropped_tasks:
14485                                 portage.writemsg("!!! One or more packages have been " + \
14486                                         "dropped due to\n" + \
14487                                         "!!! masking or unsatisfied dependencies:\n\n",
14488                                         noiselevel=-1)
14489                                 for task in dropped_tasks:
14490                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14491                                 portage.writemsg("\n", noiselevel=-1)
14492                         del dropped_tasks
14493                 else:
14494                         if mydepgraph is not None:
14495                                 mydepgraph.display_problems()
14496                         if not (ask or pretend):
14497                                 # delete the current list and also the backup
14498                                 # since it's probably stale too.
14499                                 for k in ("resume", "resume_backup"):
14500                                         mtimedb.pop(k, None)
14501                                 mtimedb.commit()
14502
14503                         return 1
14504         else:
14505                 if ("--resume" in myopts):
14506                         print darkgreen("emerge: It seems we have nothing to resume...")
14507                         return os.EX_OK
14508
14509                 myparams = create_depgraph_params(myopts, myaction)
14510                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14511                         print "Calculating dependencies  ",
14512                         sys.stdout.flush()
14513                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14514                 try:
14515                         retval, favorites = mydepgraph.select_files(myfiles)
14516                 except portage.exception.PackageNotFound, e:
14517                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14518                         return 1
14519                 except portage.exception.PackageSetNotFound, e:
14520                         root_config = trees[settings["ROOT"]]["root_config"]
14521                         display_missing_pkg_set(root_config, e.value)
14522                         return 1
14523                 if show_spinner:
14524                         print "\b\b... done!"
14525                 if not retval:
14526                         mydepgraph.display_problems()
14527                         return 1
14528
14529         if "--pretend" not in myopts and \
14530                 ("--ask" in myopts or "--tree" in myopts or \
14531                 "--verbose" in myopts) and \
14532                 not ("--quiet" in myopts and "--ask" not in myopts):
14533                 if "--resume" in myopts:
14534                         mymergelist = mydepgraph.altlist()
14535                         if len(mymergelist) == 0:
14536                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14537                                 return os.EX_OK
14538                         favorites = mtimedb["resume"]["favorites"]
14539                         retval = mydepgraph.display(
14540                                 mydepgraph.altlist(reversed=tree),
14541                                 favorites=favorites)
14542                         mydepgraph.display_problems()
14543                         if retval != os.EX_OK:
14544                                 return retval
14545                         prompt="Would you like to resume merging these packages?"
14546                 else:
14547                         retval = mydepgraph.display(
14548                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14549                                 favorites=favorites)
14550                         mydepgraph.display_problems()
14551                         if retval != os.EX_OK:
14552                                 return retval
14553                         mergecount=0
14554                         for x in mydepgraph.altlist():
14555                                 if isinstance(x, Package) and x.operation == "merge":
14556                                         mergecount += 1
14557
14558                         if mergecount==0:
14559                                 sets = trees[settings["ROOT"]]["root_config"].sets
14560                                 world_candidates = None
14561                                 if "--noreplace" in myopts and \
14562                                         not oneshot and favorites:
14563                                         # Sets that are not world candidates are filtered
14564                                         # out here since the favorites list needs to be
14565                                         # complete for depgraph.loadResumeCommand() to
14566                                         # operate correctly.
14567                                         world_candidates = [x for x in favorites \
14568                                                 if not (x.startswith(SETPREFIX) and \
14569                                                 not sets[x[1:]].world_candidate)]
14570                                 if "--noreplace" in myopts and \
14571                                         not oneshot and world_candidates:
14572                                         print
14573                                         for x in world_candidates:
14574                                                 print " %s %s" % (good("*"), x)
14575                                         prompt="Would you like to add these packages to your world favorites?"
14576                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14577                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14578                                 else:
14579                                         print
14580                                         print "Nothing to merge; quitting."
14581                                         print
14582                                         return os.EX_OK
14583                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14584                                 prompt="Would you like to fetch the source files for these packages?"
14585                         else:
14586                                 prompt="Would you like to merge these packages?"
14587                 print
14588                 if "--ask" in myopts and userquery(prompt) == "No":
14589                         print
14590                         print "Quitting."
14591                         print
14592                         return os.EX_OK
14593                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14594                 myopts.pop("--ask", None)
14595
14596         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14597                 if ("--resume" in myopts):
14598                         mymergelist = mydepgraph.altlist()
14599                         if len(mymergelist) == 0:
14600                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14601                                 return os.EX_OK
14602                         favorites = mtimedb["resume"]["favorites"]
14603                         retval = mydepgraph.display(
14604                                 mydepgraph.altlist(reversed=tree),
14605                                 favorites=favorites)
14606                         mydepgraph.display_problems()
14607                         if retval != os.EX_OK:
14608                                 return retval
14609                 else:
14610                         retval = mydepgraph.display(
14611                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14612                                 favorites=favorites)
14613                         mydepgraph.display_problems()
14614                         if retval != os.EX_OK:
14615                                 return retval
14616                         if "--buildpkgonly" in myopts:
14617                                 graph_copy = mydepgraph.digraph.clone()
14618                                 removed_nodes = set()
14619                                 for node in graph_copy:
14620                                         if not isinstance(node, Package) or \
14621                                                 node.operation == "nomerge":
14622                                                 removed_nodes.add(node)
14623                                 graph_copy.difference_update(removed_nodes)
14624                                 if not graph_copy.hasallzeros(ignore_priority = \
14625                                         DepPrioritySatisfiedRange.ignore_medium):
14626                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14627                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14628                                         return 1
14629         else:
14630                 if "--buildpkgonly" in myopts:
14631                         graph_copy = mydepgraph.digraph.clone()
14632                         removed_nodes = set()
14633                         for node in graph_copy:
14634                                 if not isinstance(node, Package) or \
14635                                         node.operation == "nomerge":
14636                                         removed_nodes.add(node)
14637                         graph_copy.difference_update(removed_nodes)
14638                         if not graph_copy.hasallzeros(ignore_priority = \
14639                                 DepPrioritySatisfiedRange.ignore_medium):
14640                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14641                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14642                                 return 1
14643
14644                 if ("--resume" in myopts):
14645                         favorites=mtimedb["resume"]["favorites"]
14646                         mymergelist = mydepgraph.altlist()
14647                         mydepgraph.break_refs(mymergelist)
14648                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14649                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14650                         del mydepgraph, mymergelist
14651                         clear_caches(trees)
14652
14653                         retval = mergetask.merge()
14654                         merge_count = mergetask.curval
14655                 else:
14656                         if "resume" in mtimedb and \
14657                         "mergelist" in mtimedb["resume"] and \
14658                         len(mtimedb["resume"]["mergelist"]) > 1:
14659                                 mtimedb["resume_backup"] = mtimedb["resume"]
14660                                 del mtimedb["resume"]
14661                                 mtimedb.commit()
14662                         mtimedb["resume"]={}
14663                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14664                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14665                         # a list type for options.
14666                         mtimedb["resume"]["myopts"] = myopts.copy()
14667
14668                         # Convert Atom instances to plain str.
14669                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14670
14671                         pkglist = mydepgraph.altlist()
14672                         mydepgraph.saveNomergeFavorites()
14673                         mydepgraph.break_refs(pkglist)
14674                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14675                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14676                         del mydepgraph, pkglist
14677                         clear_caches(trees)
14678
14679                         retval = mergetask.merge()
14680                         merge_count = mergetask.curval
14681
14682                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14683                         if "yes" == settings.get("AUTOCLEAN"):
14684                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14685                                 unmerge(trees[settings["ROOT"]]["root_config"],
14686                                         myopts, "clean", [],
14687                                         ldpath_mtimes, autoclean=1)
14688                         else:
14689                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14690                                         + " AUTOCLEAN is disabled.  This can cause serious"
14691                                         + " problems due to overlapping packages.\n")
14692                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14693
14694                 return retval
14695
14696 def multiple_actions(action1, action2):
14697         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14698         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14699         sys.exit(1)
14700
14701 def insert_optional_args(args):
14702         """
14703         Parse optional arguments and insert a value if one has
14704         not been provided. This is done before feeding the args
14705         to the optparse parser since that parser does not support
14706         this feature natively.
14707         """
14708
14709         new_args = []
14710         jobs_opts = ("-j", "--jobs")
14711         arg_stack = args[:]
14712         arg_stack.reverse()
14713         while arg_stack:
14714                 arg = arg_stack.pop()
14715
14716                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14717                 if not (short_job_opt or arg in jobs_opts):
14718                         new_args.append(arg)
14719                         continue
14720
14721                 # Insert an empty placeholder in order to
14722                 # satisfy the requirements of optparse.
14723
14724                 new_args.append("--jobs")
14725                 job_count = None
14726                 saved_opts = None
14727                 if short_job_opt and len(arg) > 2:
14728                         if arg[:2] == "-j":
14729                                 try:
14730                                         job_count = int(arg[2:])
14731                                 except ValueError:
14732                                         saved_opts = arg[2:]
14733                         else:
14734                                 job_count = "True"
14735                                 saved_opts = arg[1:].replace("j", "")
14736
14737                 if job_count is None and arg_stack:
14738                         try:
14739                                 job_count = int(arg_stack[-1])
14740                         except ValueError:
14741                                 pass
14742                         else:
14743                                 # Discard the job count from the stack
14744                                 # since we're consuming it here.
14745                                 arg_stack.pop()
14746
14747                 if job_count is None:
14748                         # unlimited number of jobs
14749                         new_args.append("True")
14750                 else:
14751                         new_args.append(str(job_count))
14752
14753                 if saved_opts is not None:
14754                         new_args.append("-" + saved_opts)
14755
14756         return new_args
14757
14758 def parse_opts(tmpcmdline, silent=False):
14759         myaction=None
14760         myopts = {}
14761         myfiles=[]
14762
14763         global actions, options, shortmapping
14764
14765         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14766         argument_options = {
14767                 "--config-root": {
14768                         "help":"specify the location for portage configuration files",
14769                         "action":"store"
14770                 },
14771                 "--color": {
14772                         "help":"enable or disable color output",
14773                         "type":"choice",
14774                         "choices":("y", "n")
14775                 },
14776
14777                 "--jobs": {
14778
14779                         "help"   : "Specifies the number of packages to build " + \
14780                                 "simultaneously.",
14781
14782                         "action" : "store"
14783                 },
14784
14785                 "--load-average": {
14786
14787                         "help"   :"Specifies that no new builds should be started " + \
14788                                 "if there are other builds running and the load average " + \
14789                                 "is at least LOAD (a floating-point number).",
14790
14791                         "action" : "store"
14792                 },
14793
14794                 "--with-bdeps": {
14795                         "help":"include unnecessary build time dependencies",
14796                         "type":"choice",
14797                         "choices":("y", "n")
14798                 },
14799                 "--reinstall": {
14800                         "help":"specify conditions to trigger package reinstallation",
14801                         "type":"choice",
14802                         "choices":["changed-use"]
14803                 }
14804         }
14805
14806         from optparse import OptionParser
14807         parser = OptionParser()
14808         if parser.has_option("--help"):
14809                 parser.remove_option("--help")
14810
14811         for action_opt in actions:
14812                 parser.add_option("--" + action_opt, action="store_true",
14813                         dest=action_opt.replace("-", "_"), default=False)
14814         for myopt in options:
14815                 parser.add_option(myopt, action="store_true",
14816                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14817         for shortopt, longopt in shortmapping.iteritems():
14818                 parser.add_option("-" + shortopt, action="store_true",
14819                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14820         for myalias, myopt in longopt_aliases.iteritems():
14821                 parser.add_option(myalias, action="store_true",
14822                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14823
14824         for myopt, kwargs in argument_options.iteritems():
14825                 parser.add_option(myopt,
14826                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14827
14828         tmpcmdline = insert_optional_args(tmpcmdline)
14829
14830         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14831
14832         if myoptions.jobs:
14833                 jobs = None
14834                 if myoptions.jobs == "True":
14835                         jobs = True
14836                 else:
14837                         try:
14838                                 jobs = int(myoptions.jobs)
14839                         except ValueError:
14840                                 jobs = -1
14841
14842                 if jobs is not True and \
14843                         jobs < 1:
14844                         jobs = None
14845                         if not silent:
14846                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14847                                         (myoptions.jobs,), noiselevel=-1)
14848
14849                 myoptions.jobs = jobs
14850
14851         if myoptions.load_average:
14852                 try:
14853                         load_average = float(myoptions.load_average)
14854                 except ValueError:
14855                         load_average = 0.0
14856
14857                 if load_average <= 0.0:
14858                         load_average = None
14859                         if not silent:
14860                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14861                                         (myoptions.load_average,), noiselevel=-1)
14862
14863                 myoptions.load_average = load_average
14864
14865         for myopt in options:
14866                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14867                 if v:
14868                         myopts[myopt] = True
14869
14870         for myopt in argument_options:
14871                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14872                 if v is not None:
14873                         myopts[myopt] = v
14874
14875         if myoptions.searchdesc:
14876                 myoptions.search = True
14877
14878         for action_opt in actions:
14879                 v = getattr(myoptions, action_opt.replace("-", "_"))
14880                 if v:
14881                         if myaction:
14882                                 multiple_actions(myaction, action_opt)
14883                                 sys.exit(1)
14884                         myaction = action_opt
14885
14886         myfiles += myargs
14887
14888         return myaction, myopts, myfiles
14889
14890 def validate_ebuild_environment(trees):
14891         for myroot in trees:
14892                 settings = trees[myroot]["vartree"].settings
14893                 settings.validate()
14894
14895 def clear_caches(trees):
14896         for d in trees.itervalues():
14897                 d["porttree"].dbapi.melt()
14898                 d["porttree"].dbapi._aux_cache.clear()
14899                 d["bintree"].dbapi._aux_cache.clear()
14900                 d["bintree"].dbapi._clear_cache()
14901                 d["vartree"].dbapi.linkmap._clear_cache()
14902         portage.dircache.clear()
14903         gc.collect()
14904
14905 def load_emerge_config(trees=None):
14906         kwargs = {}
14907         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14908                 v = os.environ.get(envvar, None)
14909                 if v and v.strip():
14910                         kwargs[k] = v
14911         trees = portage.create_trees(trees=trees, **kwargs)
14912
14913         for root, root_trees in trees.iteritems():
14914                 settings = root_trees["vartree"].settings
14915                 setconfig = load_default_config(settings, root_trees)
14916                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14917
14918         settings = trees["/"]["vartree"].settings
14919
14920         for myroot in trees:
14921                 if myroot != "/":
14922                         settings = trees[myroot]["vartree"].settings
14923                         break
14924
14925         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14926         mtimedb = portage.MtimeDB(mtimedbfile)
14927         
14928         return settings, trees, mtimedb
14929
14930 def adjust_config(myopts, settings):
14931         """Make emerge specific adjustments to the config."""
14932
14933         # To enhance usability, make some vars case insensitive by forcing them to
14934         # lower case.
14935         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14936                 if myvar in settings:
14937                         settings[myvar] = settings[myvar].lower()
14938                         settings.backup_changes(myvar)
14939         del myvar
14940
14941         # Kill noauto as it will break merges otherwise.
14942         if "noauto" in settings.features:
14943                 settings.features.remove('noauto')
14944                 settings['FEATURES'] = ' '.join(sorted(settings.features))
14945                 settings.backup_changes("FEATURES")
14946
14947         CLEAN_DELAY = 5
14948         try:
14949                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14950         except ValueError, e:
14951                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14952                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14953                         settings["CLEAN_DELAY"], noiselevel=-1)
14954         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14955         settings.backup_changes("CLEAN_DELAY")
14956
14957         EMERGE_WARNING_DELAY = 10
14958         try:
14959                 EMERGE_WARNING_DELAY = int(settings.get(
14960                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14961         except ValueError, e:
14962                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14963                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14964                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14965         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14966         settings.backup_changes("EMERGE_WARNING_DELAY")
14967
14968         if "--quiet" in myopts:
14969                 settings["PORTAGE_QUIET"]="1"
14970                 settings.backup_changes("PORTAGE_QUIET")
14971
14972         if "--verbose" in myopts:
14973                 settings["PORTAGE_VERBOSE"] = "1"
14974                 settings.backup_changes("PORTAGE_VERBOSE")
14975
14976         # Set so that configs will be merged regardless of remembered status
14977         if ("--noconfmem" in myopts):
14978                 settings["NOCONFMEM"]="1"
14979                 settings.backup_changes("NOCONFMEM")
14980
14981         # Set various debug markers... They should be merged somehow.
14982         PORTAGE_DEBUG = 0
14983         try:
14984                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14985                 if PORTAGE_DEBUG not in (0, 1):
14986                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14987                                 PORTAGE_DEBUG, noiselevel=-1)
14988                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14989                                 noiselevel=-1)
14990                         PORTAGE_DEBUG = 0
14991         except ValueError, e:
14992                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14993                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14994                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14995                 del e
14996         if "--debug" in myopts:
14997                 PORTAGE_DEBUG = 1
14998         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14999         settings.backup_changes("PORTAGE_DEBUG")
15000
15001         if settings.get("NOCOLOR") not in ("yes","true"):
15002                 portage.output.havecolor = 1
15003
15004         """The explicit --color < y | n > option overrides the NOCOLOR environment
15005         variable and stdout auto-detection."""
15006         if "--color" in myopts:
15007                 if "y" == myopts["--color"]:
15008                         portage.output.havecolor = 1
15009                         settings["NOCOLOR"] = "false"
15010                 else:
15011                         portage.output.havecolor = 0
15012                         settings["NOCOLOR"] = "true"
15013                 settings.backup_changes("NOCOLOR")
15014         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15015                 portage.output.havecolor = 0
15016                 settings["NOCOLOR"] = "true"
15017                 settings.backup_changes("NOCOLOR")
15018
15019 def apply_priorities(settings):
15020         ionice(settings)
15021         nice(settings)
15022
15023 def nice(settings):
15024         try:
15025                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15026         except (OSError, ValueError), e:
15027                 out = portage.output.EOutput()
15028                 out.eerror("Failed to change nice value to '%s'" % \
15029                         settings["PORTAGE_NICENESS"])
15030                 out.eerror("%s\n" % str(e))
15031
15032 def ionice(settings):
15033
15034         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15035         if ionice_cmd:
15036                 ionice_cmd = shlex.split(ionice_cmd)
15037         if not ionice_cmd:
15038                 return
15039
15040         from portage.util import varexpand
15041         variables = {"PID" : str(os.getpid())}
15042         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15043
15044         try:
15045                 rval = portage.process.spawn(cmd, env=os.environ)
15046         except portage.exception.CommandNotFound:
15047                 # The OS kernel probably doesn't support ionice,
15048                 # so return silently.
15049                 return
15050
15051         if rval != os.EX_OK:
15052                 out = portage.output.EOutput()
15053                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15054                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15055
15056 def display_missing_pkg_set(root_config, set_name):
15057
15058         msg = []
15059         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15060                 "The following sets exist:") % \
15061                 colorize("INFORM", set_name))
15062         msg.append("")
15063
15064         for s in sorted(root_config.sets):
15065                 msg.append("    %s" % s)
15066         msg.append("")
15067
15068         writemsg_level("".join("%s\n" % l for l in msg),
15069                 level=logging.ERROR, noiselevel=-1)
15070
15071 def expand_set_arguments(myfiles, myaction, root_config):
15072         retval = os.EX_OK
15073         setconfig = root_config.setconfig
15074
15075         sets = setconfig.getSets()
15076
15077         # In order to know exactly which atoms/sets should be added to the
15078         # world file, the depgraph performs set expansion later. It will get
15079         # confused about where the atoms came from if it's not allowed to
15080         # expand them itself.
15081         do_not_expand = (None, )
15082         newargs = []
15083         for a in myfiles:
15084                 if a in ("system", "world"):
15085                         newargs.append(SETPREFIX+a)
15086                 else:
15087                         newargs.append(a)
15088         myfiles = newargs
15089         del newargs
15090         newargs = []
15091
15092         # separators for set arguments
15093         ARG_START = "{"
15094         ARG_END = "}"
15095
15096         # WARNING: all operators must be of equal length
15097         IS_OPERATOR = "/@"
15098         DIFF_OPERATOR = "-@"
15099         UNION_OPERATOR = "+@"
15100         
15101         for i in range(0, len(myfiles)):
15102                 if myfiles[i].startswith(SETPREFIX):
15103                         start = 0
15104                         end = 0
15105                         x = myfiles[i][len(SETPREFIX):]
15106                         newset = ""
15107                         while x:
15108                                 start = x.find(ARG_START)
15109                                 end = x.find(ARG_END)
15110                                 if start > 0 and start < end:
15111                                         namepart = x[:start]
15112                                         argpart = x[start+1:end]
15113                                 
15114                                         # TODO: implement proper quoting
15115                                         args = argpart.split(",")
15116                                         options = {}
15117                                         for a in args:
15118                                                 if "=" in a:
15119                                                         k, v  = a.split("=", 1)
15120                                                         options[k] = v
15121                                                 else:
15122                                                         options[a] = "True"
15123                                         setconfig.update(namepart, options)
15124                                         newset += (x[:start-len(namepart)]+namepart)
15125                                         x = x[end+len(ARG_END):]
15126                                 else:
15127                                         newset += x
15128                                         x = ""
15129                         myfiles[i] = SETPREFIX+newset
15130                                 
15131         sets = setconfig.getSets()
15132
15133         # display errors that occured while loading the SetConfig instance
15134         for e in setconfig.errors:
15135                 print colorize("BAD", "Error during set creation: %s" % e)
15136         
15137         # emerge relies on the existance of sets with names "world" and "system"
15138         required_sets = ("world", "system")
15139         missing_sets = []
15140
15141         for s in required_sets:
15142                 if s not in sets:
15143                         missing_sets.append(s)
15144         if missing_sets:
15145                 if len(missing_sets) > 2:
15146                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15147                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15148                 elif len(missing_sets) == 2:
15149                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15150                 else:
15151                         missing_sets_str = '"%s"' % missing_sets[-1]
15152                 msg = ["emerge: incomplete set configuration, " + \
15153                         "missing set(s): %s" % missing_sets_str]
15154                 if sets:
15155                         msg.append("        sets defined: %s" % ", ".join(sets))
15156                 msg.append("        This usually means that '%s'" % \
15157                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15158                 msg.append("        is missing or corrupt.")
15159                 for line in msg:
15160                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15161                 return (None, 1)
15162         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15163
15164         for a in myfiles:
15165                 if a.startswith(SETPREFIX):
15166                         # support simple set operations (intersection, difference and union)
15167                         # on the commandline. Expressions are evaluated strictly left-to-right
15168                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15169                                 expression = a[len(SETPREFIX):]
15170                                 expr_sets = []
15171                                 expr_ops = []
15172                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15173                                         is_pos = expression.rfind(IS_OPERATOR)
15174                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15175                                         union_pos = expression.rfind(UNION_OPERATOR)
15176                                         op_pos = max(is_pos, diff_pos, union_pos)
15177                                         s1 = expression[:op_pos]
15178                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15179                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15180                                         if not s2 in sets:
15181                                                 display_missing_pkg_set(root_config, s2)
15182                                                 return (None, 1)
15183                                         expr_sets.insert(0, s2)
15184                                         expr_ops.insert(0, op)
15185                                         expression = s1
15186                                 if not expression in sets:
15187                                         display_missing_pkg_set(root_config, expression)
15188                                         return (None, 1)
15189                                 expr_sets.insert(0, expression)
15190                                 result = set(setconfig.getSetAtoms(expression))
15191                                 for i in range(0, len(expr_ops)):
15192                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15193                                         if expr_ops[i] == IS_OPERATOR:
15194                                                 result.intersection_update(s2)
15195                                         elif expr_ops[i] == DIFF_OPERATOR:
15196                                                 result.difference_update(s2)
15197                                         elif expr_ops[i] == UNION_OPERATOR:
15198                                                 result.update(s2)
15199                                         else:
15200                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15201                                 newargs.extend(result)
15202                         else:                   
15203                                 s = a[len(SETPREFIX):]
15204                                 if s not in sets:
15205                                         display_missing_pkg_set(root_config, s)
15206                                         return (None, 1)
15207                                 setconfig.active.append(s)
15208                                 try:
15209                                         set_atoms = setconfig.getSetAtoms(s)
15210                                 except portage.exception.PackageSetNotFound, e:
15211                                         writemsg_level(("emerge: the given set '%s' " + \
15212                                                 "contains a non-existent set named '%s'.\n") % \
15213                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15214                                         return (None, 1)
15215                                 if myaction in unmerge_actions and \
15216                                                 not sets[s].supportsOperation("unmerge"):
15217                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15218                                                 "not support unmerge operations\n")
15219                                         retval = 1
15220                                 elif not set_atoms:
15221                                         print "emerge: '%s' is an empty set" % s
15222                                 elif myaction not in do_not_expand:
15223                                         newargs.extend(set_atoms)
15224                                 else:
15225                                         newargs.append(SETPREFIX+s)
15226                                 for e in sets[s].errors:
15227                                         print e
15228                 else:
15229                         newargs.append(a)
15230         return (newargs, retval)
15231
15232 def repo_name_check(trees):
15233         missing_repo_names = set()
15234         for root, root_trees in trees.iteritems():
15235                 if "porttree" in root_trees:
15236                         portdb = root_trees["porttree"].dbapi
15237                         missing_repo_names.update(portdb.porttrees)
15238                         repos = portdb.getRepositories()
15239                         for r in repos:
15240                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15241                         if portdb.porttree_root in missing_repo_names and \
15242                                 not os.path.exists(os.path.join(
15243                                 portdb.porttree_root, "profiles")):
15244                                 # This is normal if $PORTDIR happens to be empty,
15245                                 # so don't warn about it.
15246                                 missing_repo_names.remove(portdb.porttree_root)
15247
15248         if missing_repo_names:
15249                 msg = []
15250                 msg.append("WARNING: One or more repositories " + \
15251                         "have missing repo_name entries:")
15252                 msg.append("")
15253                 for p in missing_repo_names:
15254                         msg.append("\t%s/profiles/repo_name" % (p,))
15255                 msg.append("")
15256                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15257                         "should be a plain text file containing a unique " + \
15258                         "name for the repository on the first line.", 70))
15259                 writemsg_level("".join("%s\n" % l for l in msg),
15260                         level=logging.WARNING, noiselevel=-1)
15261
15262         return bool(missing_repo_names)
15263
15264 def config_protect_check(trees):
15265         for root, root_trees in trees.iteritems():
15266                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15267                         msg = "!!! CONFIG_PROTECT is empty"
15268                         if root != "/":
15269                                 msg += " for '%s'" % root
15270                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15271
15272 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15273
15274         if "--quiet" in myopts:
15275                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15276                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15277                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15278                         print "    " + colorize("INFORM", cp)
15279                 return
15280
15281         s = search(root_config, spinner, "--searchdesc" in myopts,
15282                 "--quiet" not in myopts, "--usepkg" in myopts,
15283                 "--usepkgonly" in myopts)
15284         null_cp = portage.dep_getkey(insert_category_into_atom(
15285                 arg, "null"))
15286         cat, atom_pn = portage.catsplit(null_cp)
15287         s.searchkey = atom_pn
15288         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15289                 s.addCP(cp)
15290         s.output()
15291         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15292         print "!!! one of the above fully-qualified ebuild names instead.\n"
15293
15294 def profile_check(trees, myaction, myopts):
15295         if myaction in ("info", "sync"):
15296                 return os.EX_OK
15297         elif "--version" in myopts or "--help" in myopts:
15298                 return os.EX_OK
15299         for root, root_trees in trees.iteritems():
15300                 if root_trees["root_config"].settings.profiles:
15301                         continue
15302                 # generate some profile related warning messages
15303                 validate_ebuild_environment(trees)
15304                 msg = "If you have just changed your profile configuration, you " + \
15305                         "should revert back to the previous configuration. Due to " + \
15306                         "your current profile being invalid, allowed actions are " + \
15307                         "limited to --help, --info, --sync, and --version."
15308                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15309                         level=logging.ERROR, noiselevel=-1)
15310                 return 1
15311         return os.EX_OK
15312
15313 def emerge_main():
15314         global portage  # NFC why this is necessary now - genone
15315         portage._disable_legacy_globals()
15316         # Disable color until we're sure that it should be enabled (after
15317         # EMERGE_DEFAULT_OPTS has been parsed).
15318         portage.output.havecolor = 0
15319         # This first pass is just for options that need to be known as early as
15320         # possible, such as --config-root.  They will be parsed again later,
15321         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15322         # the value of --config-root).
15323         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15324         if "--debug" in myopts:
15325                 os.environ["PORTAGE_DEBUG"] = "1"
15326         if "--config-root" in myopts:
15327                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15328
15329         # Portage needs to ensure a sane umask for the files it creates.
15330         os.umask(022)
15331         settings, trees, mtimedb = load_emerge_config()
15332         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15333         rval = profile_check(trees, myaction, myopts)
15334         if rval != os.EX_OK:
15335                 return rval
15336
15337         if portage._global_updates(trees, mtimedb["updates"]):
15338                 mtimedb.commit()
15339                 # Reload the whole config from scratch.
15340                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15341                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15342
15343         xterm_titles = "notitles" not in settings.features
15344
15345         tmpcmdline = []
15346         if "--ignore-default-opts" not in myopts:
15347                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15348         tmpcmdline.extend(sys.argv[1:])
15349         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15350
15351         if "--digest" in myopts:
15352                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15353                 # Reload the whole config from scratch so that the portdbapi internal
15354                 # config is updated with new FEATURES.
15355                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15356                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15357
15358         for myroot in trees:
15359                 mysettings =  trees[myroot]["vartree"].settings
15360                 mysettings.unlock()
15361                 adjust_config(myopts, mysettings)
15362                 if '--pretend' not in myopts and myaction in \
15363                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15364                         mysettings["PORTAGE_COUNTER_HASH"] = \
15365                                 trees[myroot]["vartree"].dbapi._counter_hash()
15366                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15367                 mysettings.lock()
15368                 del myroot, mysettings
15369
15370         apply_priorities(settings)
15371
15372         spinner = stdout_spinner()
15373         if "candy" in settings.features:
15374                 spinner.update = spinner.update_scroll
15375
15376         if "--quiet" not in myopts:
15377                 portage.deprecated_profile_check(settings=settings)
15378                 repo_name_check(trees)
15379                 config_protect_check(trees)
15380
15381         eclasses_overridden = {}
15382         for mytrees in trees.itervalues():
15383                 mydb = mytrees["porttree"].dbapi
15384                 # Freeze the portdbapi for performance (memoize all xmatch results).
15385                 mydb.freeze()
15386                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15387         del mytrees, mydb
15388
15389         if eclasses_overridden and \
15390                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15391                 prefix = bad(" * ")
15392                 if len(eclasses_overridden) == 1:
15393                         writemsg(prefix + "Overlay eclass overrides " + \
15394                                 "eclass from PORTDIR:\n", noiselevel=-1)
15395                 else:
15396                         writemsg(prefix + "Overlay eclasses override " + \
15397                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15398                 writemsg(prefix + "\n", noiselevel=-1)
15399                 for eclass_name in sorted(eclasses_overridden):
15400                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15401                                 (eclasses_overridden[eclass_name], eclass_name),
15402                                 noiselevel=-1)
15403                 writemsg(prefix + "\n", noiselevel=-1)
15404                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15405                 "because it will trigger invalidation of cached ebuild metadata " + \
15406                 "that is distributed with the portage tree. If you must " + \
15407                 "override eclasses from PORTDIR then you are advised to add " + \
15408                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15409                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15410                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15411                 "you would like to disable this warning."
15412                 from textwrap import wrap
15413                 for line in wrap(msg, 72):
15414                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15415
15416         if "moo" in myfiles:
15417                 print """
15418
15419   Larry loves Gentoo (""" + platform.system() + """)
15420
15421  _______________________
15422 < Have you mooed today? >
15423  -----------------------
15424         \   ^__^
15425          \  (oo)\_______
15426             (__)\       )\/\ 
15427                 ||----w |
15428                 ||     ||
15429
15430 """
15431
15432         for x in myfiles:
15433                 ext = os.path.splitext(x)[1]
15434                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15435                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15436                         break
15437
15438         root_config = trees[settings["ROOT"]]["root_config"]
15439         if myaction == "list-sets":
15440                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15441                 sys.stdout.flush()
15442                 return os.EX_OK
15443
15444         # only expand sets for actions taking package arguments
15445         oldargs = myfiles[:]
15446         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15447                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15448                 if retval != os.EX_OK:
15449                         return retval
15450
15451                 # Need to handle empty sets specially, otherwise emerge will react 
15452                 # with the help message for empty argument lists
15453                 if oldargs and not myfiles:
15454                         print "emerge: no targets left after set expansion"
15455                         return 0
15456
15457         if ("--tree" in myopts) and ("--columns" in myopts):
15458                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15459                 return 1
15460
15461         if ("--quiet" in myopts):
15462                 spinner.update = spinner.update_quiet
15463                 portage.util.noiselimit = -1
15464
15465         # Always create packages if FEATURES=buildpkg
15466         # Imply --buildpkg if --buildpkgonly
15467         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15468                 if "--buildpkg" not in myopts:
15469                         myopts["--buildpkg"] = True
15470
15471         # Always try and fetch binary packages if FEATURES=getbinpkg
15472         if ("getbinpkg" in settings.features):
15473                 myopts["--getbinpkg"] = True
15474
15475         if "--buildpkgonly" in myopts:
15476                 # --buildpkgonly will not merge anything, so
15477                 # it cancels all binary package options.
15478                 for opt in ("--getbinpkg", "--getbinpkgonly",
15479                         "--usepkg", "--usepkgonly"):
15480                         myopts.pop(opt, None)
15481
15482         if "--fetch-all-uri" in myopts:
15483                 myopts["--fetchonly"] = True
15484
15485         if "--skipfirst" in myopts and "--resume" not in myopts:
15486                 myopts["--resume"] = True
15487
15488         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15489                 myopts["--usepkgonly"] = True
15490
15491         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15492                 myopts["--getbinpkg"] = True
15493
15494         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15495                 myopts["--usepkg"] = True
15496
15497         # Also allow -K to apply --usepkg/-k
15498         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15499                 myopts["--usepkg"] = True
15500
15501         # Allow -p to remove --ask
15502         if ("--pretend" in myopts) and ("--ask" in myopts):
15503                 print ">>> --pretend disables --ask... removing --ask from options."
15504                 del myopts["--ask"]
15505
15506         # forbid --ask when not in a terminal
15507         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15508         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15509                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15510                         noiselevel=-1)
15511                 return 1
15512
15513         if settings.get("PORTAGE_DEBUG", "") == "1":
15514                 spinner.update = spinner.update_quiet
15515                 portage.debug=1
15516                 if "python-trace" in settings.features:
15517                         import portage.debug
15518                         portage.debug.set_trace(True)
15519
15520         if not ("--quiet" in myopts):
15521                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15522                         spinner.update = spinner.update_basic
15523
15524         if myaction == 'version':
15525                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15526                         settings.profile_path, settings["CHOST"],
15527                         trees[settings["ROOT"]]["vartree"].dbapi)
15528                 return 0
15529         elif "--help" in myopts:
15530                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15531                 return 0
15532
15533         if "--debug" in myopts:
15534                 print "myaction", myaction
15535                 print "myopts", myopts
15536
15537         if not myaction and not myfiles and "--resume" not in myopts:
15538                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15539                 return 1
15540
15541         pretend = "--pretend" in myopts
15542         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15543         buildpkgonly = "--buildpkgonly" in myopts
15544
15545         # check if root user is the current user for the actions where emerge needs this
15546         if portage.secpass < 2:
15547                 # We've already allowed "--version" and "--help" above.
15548                 if "--pretend" not in myopts and myaction not in ("search","info"):
15549                         need_superuser = not \
15550                                 (fetchonly or \
15551                                 (buildpkgonly and secpass >= 1) or \
15552                                 myaction in ("metadata", "regen") or \
15553                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15554                         if portage.secpass < 1 or \
15555                                 need_superuser:
15556                                 if need_superuser:
15557                                         access_desc = "superuser"
15558                                 else:
15559                                         access_desc = "portage group"
15560                                 # Always show portage_group_warning() when only portage group
15561                                 # access is required but the user is not in the portage group.
15562                                 from portage.data import portage_group_warning
15563                                 if "--ask" in myopts:
15564                                         myopts["--pretend"] = True
15565                                         del myopts["--ask"]
15566                                         print ("%s access is required... " + \
15567                                                 "adding --pretend to options.\n") % access_desc
15568                                         if portage.secpass < 1 and not need_superuser:
15569                                                 portage_group_warning()
15570                                 else:
15571                                         sys.stderr.write(("emerge: %s access is " + \
15572                                                 "required.\n\n") % access_desc)
15573                                         if portage.secpass < 1 and not need_superuser:
15574                                                 portage_group_warning()
15575                                         return 1
15576
15577         disable_emergelog = False
15578         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15579                 if x in myopts:
15580                         disable_emergelog = True
15581                         break
15582         if myaction in ("search", "info"):
15583                 disable_emergelog = True
15584         if disable_emergelog:
15585                 """ Disable emergelog for everything except build or unmerge
15586                 operations.  This helps minimize parallel emerge.log entries that can
15587                 confuse log parsers.  We especially want it disabled during
15588                 parallel-fetch, which uses --resume --fetchonly."""
15589                 global emergelog
15590                 def emergelog(*pargs, **kargs):
15591                         pass
15592
15593         if not "--pretend" in myopts:
15594                 emergelog(xterm_titles, "Started emerge on: "+\
15595                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15596                 myelogstr=""
15597                 if myopts:
15598                         myelogstr=" ".join(myopts)
15599                 if myaction:
15600                         myelogstr+=" "+myaction
15601                 if myfiles:
15602                         myelogstr += " " + " ".join(oldargs)
15603                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15604         del oldargs
15605
15606         def emergeexitsig(signum, frame):
15607                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15608                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15609                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15610                 sys.exit(100+signum)
15611         signal.signal(signal.SIGINT, emergeexitsig)
15612         signal.signal(signal.SIGTERM, emergeexitsig)
15613
15614         def emergeexit():
15615                 """This gets out final log message in before we quit."""
15616                 if "--pretend" not in myopts:
15617                         emergelog(xterm_titles, " *** terminating.")
15618                 if "notitles" not in settings.features:
15619                         xtermTitleReset()
15620         portage.atexit_register(emergeexit)
15621
15622         if myaction in ("config", "metadata", "regen", "sync"):
15623                 if "--pretend" in myopts:
15624                         sys.stderr.write(("emerge: The '%s' action does " + \
15625                                 "not support '--pretend'.\n") % myaction)
15626                         return 1
15627
15628         if "sync" == myaction:
15629                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15630         elif "metadata" == myaction:
15631                 action_metadata(settings, portdb, myopts)
15632         elif myaction=="regen":
15633                 validate_ebuild_environment(trees)
15634                 return action_regen(settings, portdb, myopts.get("--jobs"),
15635                         myopts.get("--load-average"))
15636         # HELP action
15637         elif "config"==myaction:
15638                 validate_ebuild_environment(trees)
15639                 action_config(settings, trees, myopts, myfiles)
15640
15641         # SEARCH action
15642         elif "search"==myaction:
15643                 validate_ebuild_environment(trees)
15644                 action_search(trees[settings["ROOT"]]["root_config"],
15645                         myopts, myfiles, spinner)
15646         elif myaction in ("clean", "unmerge") or \
15647                 (myaction == "prune" and "--nodeps" in myopts):
15648                 validate_ebuild_environment(trees)
15649
15650                 # Ensure atoms are valid before calling unmerge().
15651                 # For backward compat, leading '=' is not required.
15652                 for x in myfiles:
15653                         if is_valid_package_atom(x) or \
15654                                 is_valid_package_atom("=" + x):
15655                                 continue
15656                         msg = []
15657                         msg.append("'%s' is not a valid package atom." % (x,))
15658                         msg.append("Please check ebuild(5) for full details.")
15659                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15660                                 level=logging.ERROR, noiselevel=-1)
15661                         return 1
15662
15663                 # When given a list of atoms, unmerge
15664                 # them in the order given.
15665                 ordered = myaction == "unmerge"
15666                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15667                         mtimedb["ldpath"], ordered=ordered):
15668                         if not (buildpkgonly or fetchonly or pretend):
15669                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15670
15671         elif myaction in ("depclean", "info", "prune"):
15672
15673                 # Ensure atoms are valid before calling unmerge().
15674                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15675                 valid_atoms = []
15676                 for x in myfiles:
15677                         if is_valid_package_atom(x):
15678                                 try:
15679                                         valid_atoms.append(
15680                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15681                                 except portage.exception.AmbiguousPackageName, e:
15682                                         msg = "The short ebuild name \"" + x + \
15683                                                 "\" is ambiguous.  Please specify " + \
15684                                                 "one of the following " + \
15685                                                 "fully-qualified ebuild names instead:"
15686                                         for line in textwrap.wrap(msg, 70):
15687                                                 writemsg_level("!!! %s\n" % (line,),
15688                                                         level=logging.ERROR, noiselevel=-1)
15689                                         for i in e[0]:
15690                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15691                                                         level=logging.ERROR, noiselevel=-1)
15692                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15693                                         return 1
15694                                 continue
15695                         msg = []
15696                         msg.append("'%s' is not a valid package atom." % (x,))
15697                         msg.append("Please check ebuild(5) for full details.")
15698                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15699                                 level=logging.ERROR, noiselevel=-1)
15700                         return 1
15701
15702                 if myaction == "info":
15703                         return action_info(settings, trees, myopts, valid_atoms)
15704
15705                 validate_ebuild_environment(trees)
15706                 action_depclean(settings, trees, mtimedb["ldpath"],
15707                         myopts, myaction, valid_atoms, spinner)
15708                 if not (buildpkgonly or fetchonly or pretend):
15709                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15710         # "update", "system", or just process files:
15711         else:
15712                 validate_ebuild_environment(trees)
15713
15714                 for x in myfiles:
15715                         if x.startswith(SETPREFIX) or \
15716                                 is_valid_package_atom(x):
15717                                 continue
15718                         if x[:1] == os.sep:
15719                                 continue
15720                         try:
15721                                 os.lstat(x)
15722                                 continue
15723                         except OSError:
15724                                 pass
15725                         msg = []
15726                         msg.append("'%s' is not a valid package atom." % (x,))
15727                         msg.append("Please check ebuild(5) for full details.")
15728                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15729                                 level=logging.ERROR, noiselevel=-1)
15730                         return 1
15731
15732                 if "--pretend" not in myopts:
15733                         display_news_notification(root_config, myopts)
15734                 retval = action_build(settings, trees, mtimedb,
15735                         myopts, myaction, myfiles, spinner)
15736                 root_config = trees[settings["ROOT"]]["root_config"]
15737                 post_emerge(root_config, myopts, mtimedb, retval)
15738
15739                 return retval