Add support in MetadataRegen for cleansing stale cache when only a subset
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
58
59 from itertools import chain, izip
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         from cStringIO import StringIO
68 except ImportError:
69         from StringIO import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge", "version",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--rdeps-only",   "--root-deps",
211 "--searchdesc",   "--selective",
212 "--skipfirst",
213 "--tree",
214 "--update",
215 "--usepkg",       "--usepkgonly",
216 "--verbose",
217 ]
218
219 shortmapping={
220 "1":"--oneshot",
221 "a":"--ask",
222 "b":"--buildpkg",  "B":"--buildpkgonly",
223 "c":"--clean",     "C":"--unmerge",
224 "d":"--debug",     "D":"--deep",
225 "e":"--emptytree",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "h":"--help",
229 "k":"--usepkg",    "K":"--usepkgonly",
230 "l":"--changelog",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps",  "O":"--nodeps",
233 "p":"--pretend",   "P":"--prune",
234 "q":"--quiet",
235 "s":"--search",    "S":"--searchdesc",
236 "t":"--tree",
237 "u":"--update",
238 "v":"--verbose",   "V":"--version"
239 }
240
241 def emergelog(xterm_titles, mystr, short_msg=None):
242         if xterm_titles and short_msg:
243                 if "HOSTNAME" in os.environ:
244                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
245                 xtermTitle(short_msg)
246         try:
247                 file_path = "/var/log/emerge.log"
248                 mylogfile = open(file_path, "a")
249                 portage.util.apply_secpass_permissions(file_path,
250                         uid=portage.portage_uid, gid=portage.portage_gid,
251                         mode=0660)
252                 mylock = None
253                 try:
254                         mylock = portage.locks.lockfile(mylogfile)
255                         # seek because we may have gotten held up by the lock.
256                         # if so, we may not be positioned at the end of the file.
257                         mylogfile.seek(0, 2)
258                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
259                         mylogfile.flush()
260                 finally:
261                         if mylock:
262                                 portage.locks.unlockfile(mylock)
263                         mylogfile.close()
264         except (IOError,OSError,portage.exception.PortageException), e:
265                 if secpass >= 1:
266                         print >> sys.stderr, "emergelog():",e
267
268 def countdown(secs=5, doing="Starting"):
269         if secs:
270                 print ">>> Waiting",secs,"seconds before starting..."
271                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
272                 ticks=range(secs)
273                 ticks.reverse()
274                 for sec in ticks:
275                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
276                         sys.stdout.flush()
277                         time.sleep(1)
278                 print
279
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282         if isinstance(mysize, basestring):
283                 return mysize
284         if 0 != mysize % 1024:
285                 # Always round up to the next kB so that it doesn't show 0 kB when
286                 # some small file still needs to be fetched.
287                 mysize += 1024 - mysize % 1024
288         mystr=str(mysize/1024)
289         mycount=len(mystr)
290         while (mycount > 3):
291                 mycount-=3
292                 mystr=mystr[:mycount]+","+mystr[mycount:]
293         return mystr+" kB"
294
295
296 def getgccversion(chost):
297         """
298         rtype: C{str}
299         return:  the current in-use gcc version
300         """
301
302         gcc_ver_command = 'gcc -dumpversion'
303         gcc_ver_prefix = 'gcc-'
304
305         gcc_not_found_error = red(
306         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307         "!!! to update the environment of this terminal and possibly\n" +
308         "!!! other terminals also.\n"
309         )
310
311         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314
315         mystatus, myoutput = commands.getstatusoutput(
316                 chost + "-" + gcc_ver_command)
317         if mystatus == os.EX_OK:
318                 return gcc_ver_prefix + myoutput
319
320         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321         if mystatus == os.EX_OK:
322                 return gcc_ver_prefix + myoutput
323
324         portage.writemsg(gcc_not_found_error, noiselevel=-1)
325         return "[unavailable]"
326
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328         profilever = "unavailable"
329         if profile:
330                 realpath = os.path.realpath(profile)
331                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
332                 if realpath.startswith(basepath):
333                         profilever = realpath[1 + len(basepath):]
334                 else:
335                         try:
336                                 profilever = "!" + os.readlink(profile)
337                         except (OSError):
338                                 pass
339                 del realpath, basepath
340
341         libcver=[]
342         libclist  = vardb.match("virtual/libc")
343         libclist += vardb.match("virtual/glibc")
344         libclist  = portage.util.unique_array(libclist)
345         for x in libclist:
346                 xs=portage.catpkgsplit(x)
347                 if libcver:
348                         libcver+=","+"-".join(xs[1:])
349                 else:
350                         libcver="-".join(xs[1:])
351         if libcver==[]:
352                 libcver="unavailable"
353
354         gccver = getgccversion(chost)
355         unameout=platform.release()+" "+platform.machine()
356
357         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358
359 def create_depgraph_params(myopts, myaction):
360         #configure emerge engine parameters
361         #
362         # self:      include _this_ package regardless of if it is merged.
363         # selective: exclude the package if it is merged
364         # recurse:   go into the dependencies
365         # deep:      go into the dependencies of already merged packages
366         # empty:     pretend nothing is merged
367         # complete:  completely account for all known dependencies
368         # remove:    build graph for use in removing packages
369         myparams = set(["recurse"])
370
371         if myaction == "remove":
372                 myparams.add("remove")
373                 myparams.add("complete")
374                 return myparams
375
376         if "--update" in myopts or \
377                 "--newuse" in myopts or \
378                 "--reinstall" in myopts or \
379                 "--noreplace" in myopts:
380                 myparams.add("selective")
381         if "--emptytree" in myopts:
382                 myparams.add("empty")
383                 myparams.discard("selective")
384         if "--nodeps" in myopts:
385                 myparams.discard("recurse")
386         if "--deep" in myopts:
387                 myparams.add("deep")
388         if "--complete-graph" in myopts:
389                 myparams.add("complete")
390         return myparams
391
392 # search functionality
393 class search(object):
394
395         #
396         # class constants
397         #
398         VERSION_SHORT=1
399         VERSION_RELEASE=2
400
401         #
402         # public interface
403         #
404         def __init__(self, root_config, spinner, searchdesc,
405                 verbose, usepkg, usepkgonly):
406                 """Searches the available and installed packages for the supplied search key.
407                 The list of available and installed packages is created at object instantiation.
408                 This makes successive searches faster."""
409                 self.settings = root_config.settings
410                 self.vartree = root_config.trees["vartree"]
411                 self.spinner = spinner
412                 self.verbose = verbose
413                 self.searchdesc = searchdesc
414                 self.root_config = root_config
415                 self.setconfig = root_config.setconfig
416                 self.matches = {"pkg" : []}
417                 self.mlen = 0
418
419                 def fake_portdb():
420                         pass
421                 self.portdb = fake_portdb
422                 for attrib in ("aux_get", "cp_all",
423                         "xmatch", "findname", "getFetchMap"):
424                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
425
426                 self._dbs = []
427
428                 portdb = root_config.trees["porttree"].dbapi
429                 bindb = root_config.trees["bintree"].dbapi
430                 vardb = root_config.trees["vartree"].dbapi
431
432                 if not usepkgonly and portdb._have_root_eclass_dir:
433                         self._dbs.append(portdb)
434
435                 if (usepkg or usepkgonly) and bindb.cp_all():
436                         self._dbs.append(bindb)
437
438                 self._dbs.append(vardb)
439                 self._portdb = portdb
440
441         def _cp_all(self):
442                 cp_all = set()
443                 for db in self._dbs:
444                         cp_all.update(db.cp_all())
445                 return list(sorted(cp_all))
446
447         def _aux_get(self, *args, **kwargs):
448                 for db in self._dbs:
449                         try:
450                                 return db.aux_get(*args, **kwargs)
451                         except KeyError:
452                                 pass
453                 raise
454
455         def _findname(self, *args, **kwargs):
456                 for db in self._dbs:
457                         if db is not self._portdb:
458                                 # We don't want findname to return anything
459                                 # unless it's an ebuild in a portage tree.
460                                 # Otherwise, it's already built and we don't
461                                 # care about it.
462                                 continue
463                         func = getattr(db, "findname", None)
464                         if func:
465                                 value = func(*args, **kwargs)
466                                 if value:
467                                         return value
468                 return None
469
470         def _getFetchMap(self, *args, **kwargs):
471                 for db in self._dbs:
472                         func = getattr(db, "getFetchMap", None)
473                         if func:
474                                 value = func(*args, **kwargs)
475                                 if value:
476                                         return value
477                 return {}
478
479         def _visible(self, db, cpv, metadata):
480                 installed = db is self.vartree.dbapi
481                 built = installed or db is not self._portdb
482                 pkg_type = "ebuild"
483                 if installed:
484                         pkg_type = "installed"
485                 elif built:
486                         pkg_type = "binary"
487                 return visible(self.settings,
488                         Package(type_name=pkg_type, root_config=self.root_config,
489                         cpv=cpv, built=built, installed=installed, metadata=metadata))
490
491         def _xmatch(self, level, atom):
492                 """
493                 This method does not expand old-style virtuals because it
494                 is restricted to returning matches for a single ${CATEGORY}/${PN}
495                 and old-style virual matches unreliable for that when querying
496                 multiple package databases. If necessary, old-style virtuals
497                 can be performed on atoms prior to calling this method.
498                 """
499                 cp = portage.dep_getkey(atom)
500                 if level == "match-all":
501                         matches = set()
502                         for db in self._dbs:
503                                 if hasattr(db, "xmatch"):
504                                         matches.update(db.xmatch(level, atom))
505                                 else:
506                                         matches.update(db.match(atom))
507                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508                         db._cpv_sort_ascending(result)
509                 elif level == "match-visible":
510                         matches = set()
511                         for db in self._dbs:
512                                 if hasattr(db, "xmatch"):
513                                         matches.update(db.xmatch(level, atom))
514                                 else:
515                                         db_keys = list(db._aux_cache_keys)
516                                         for cpv in db.match(atom):
517                                                 metadata = izip(db_keys,
518                                                         db.aux_get(cpv, db_keys))
519                                                 if not self._visible(db, cpv, metadata):
520                                                         continue
521                                                 matches.add(cpv)
522                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523                         db._cpv_sort_ascending(result)
524                 elif level == "bestmatch-visible":
525                         result = None
526                         for db in self._dbs:
527                                 if hasattr(db, "xmatch"):
528                                         cpv = db.xmatch("bestmatch-visible", atom)
529                                         if not cpv or portage.cpv_getkey(cpv) != cp:
530                                                 continue
531                                         if not result or cpv == portage.best([cpv, result]):
532                                                 result = cpv
533                                 else:
534                                         db_keys = Package.metadata_keys
535                                         # break out of this loop with highest visible
536                                         # match, checked in descending order
537                                         for cpv in reversed(db.match(atom)):
538                                                 if portage.cpv_getkey(cpv) != cp:
539                                                         continue
540                                                 metadata = izip(db_keys,
541                                                         db.aux_get(cpv, db_keys))
542                                                 if not self._visible(db, cpv, metadata):
543                                                         continue
544                                                 if not result or cpv == portage.best([cpv, result]):
545                                                         result = cpv
546                                                 break
547                 else:
548                         raise NotImplementedError(level)
549                 return result
550
551         def execute(self,searchkey):
552                 """Performs the search for the supplied search key"""
553                 match_category = 0
554                 self.searchkey=searchkey
555                 self.packagematches = []
556                 if self.searchdesc:
557                         self.searchdesc=1
558                         self.matches = {"pkg":[], "desc":[], "set":[]}
559                 else:
560                         self.searchdesc=0
561                         self.matches = {"pkg":[], "set":[]}
562                 print "Searching...   ",
563
564                 regexsearch = False
565                 if self.searchkey.startswith('%'):
566                         regexsearch = True
567                         self.searchkey = self.searchkey[1:]
568                 if self.searchkey.startswith('@'):
569                         match_category = 1
570                         self.searchkey = self.searchkey[1:]
571                 if regexsearch:
572                         self.searchre=re.compile(self.searchkey,re.I)
573                 else:
574                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
575                 for package in self.portdb.cp_all():
576                         self.spinner.update()
577
578                         if match_category:
579                                 match_string  = package[:]
580                         else:
581                                 match_string  = package.split("/")[-1]
582
583                         masked=0
584                         if self.searchre.search(match_string):
585                                 if not self.portdb.xmatch("match-visible", package):
586                                         masked=1
587                                 self.matches["pkg"].append([package,masked])
588                         elif self.searchdesc: # DESCRIPTION searching
589                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
590                                 if not full_package:
591                                         #no match found; we don't want to query description
592                                         full_package = portage.best(
593                                                 self.portdb.xmatch("match-all", package))
594                                         if not full_package:
595                                                 continue
596                                         else:
597                                                 masked=1
598                                 try:
599                                         full_desc = self.portdb.aux_get(
600                                                 full_package, ["DESCRIPTION"])[0]
601                                 except KeyError:
602                                         print "emerge: search: aux_get() failed, skipping"
603                                         continue
604                                 if self.searchre.search(full_desc):
605                                         self.matches["desc"].append([full_package,masked])
606
607                 self.sdict = self.setconfig.getSets()
608                 for setname in self.sdict:
609                         self.spinner.update()
610                         if match_category:
611                                 match_string = setname
612                         else:
613                                 match_string = setname.split("/")[-1]
614                         
615                         if self.searchre.search(match_string):
616                                 self.matches["set"].append([setname, False])
617                         elif self.searchdesc:
618                                 if self.searchre.search(
619                                         self.sdict[setname].getMetadata("DESCRIPTION")):
620                                         self.matches["set"].append([setname, False])
621                         
622                 self.mlen=0
623                 for mtype in self.matches:
624                         self.matches[mtype].sort()
625                         self.mlen += len(self.matches[mtype])
626
627         def addCP(self, cp):
628                 if not self.portdb.xmatch("match-all", cp):
629                         return
630                 masked = 0
631                 if not self.portdb.xmatch("bestmatch-visible", cp):
632                         masked = 1
633                 self.matches["pkg"].append([cp, masked])
634                 self.mlen += 1
635
636         def output(self):
637                 """Outputs the results of the search."""
638                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
639                 print "[ Applications found : "+white(str(self.mlen))+" ]"
640                 print " "
641                 vardb = self.vartree.dbapi
642                 for mtype in self.matches:
643                         for match,masked in self.matches[mtype]:
644                                 full_package = None
645                                 if mtype == "pkg":
646                                         catpack = match
647                                         full_package = self.portdb.xmatch(
648                                                 "bestmatch-visible", match)
649                                         if not full_package:
650                                                 #no match found; we don't want to query description
651                                                 masked=1
652                                                 full_package = portage.best(
653                                                         self.portdb.xmatch("match-all",match))
654                                 elif mtype == "desc":
655                                         full_package = match
656                                         match        = portage.cpv_getkey(match)
657                                 elif mtype == "set":
658                                         print green("*")+"  "+white(match)
659                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
660                                         print
661                                 if full_package:
662                                         try:
663                                                 desc, homepage, license = self.portdb.aux_get(
664                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665                                         except KeyError:
666                                                 print "emerge: search: aux_get() failed, skipping"
667                                                 continue
668                                         if masked:
669                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
670                                         else:
671                                                 print green("*")+"  "+white(match)
672                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
673
674                                         mysum = [0,0]
675                                         file_size_str = None
676                                         mycat = match.split("/")[0]
677                                         mypkg = match.split("/")[1]
678                                         mycpv = match + "-" + myversion
679                                         myebuild = self.portdb.findname(mycpv)
680                                         if myebuild:
681                                                 pkgdir = os.path.dirname(myebuild)
682                                                 from portage import manifest
683                                                 mf = manifest.Manifest(
684                                                         pkgdir, self.settings["DISTDIR"])
685                                                 try:
686                                                         uri_map = self.portdb.getFetchMap(mycpv)
687                                                 except portage.exception.InvalidDependString, e:
688                                                         file_size_str = "Unknown (%s)" % (e,)
689                                                         del e
690                                                 else:
691                                                         try:
692                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
693                                                         except KeyError, e:
694                                                                 file_size_str = "Unknown (missing " + \
695                                                                         "digest for %s)" % (e,)
696                                                                 del e
697
698                                         available = False
699                                         for db in self._dbs:
700                                                 if db is not vardb and \
701                                                         db.cpv_exists(mycpv):
702                                                         available = True
703                                                         if not myebuild and hasattr(db, "bintree"):
704                                                                 myebuild = db.bintree.getname(mycpv)
705                                                                 try:
706                                                                         mysum[0] = os.stat(myebuild).st_size
707                                                                 except OSError:
708                                                                         myebuild = None
709                                                         break
710
711                                         if myebuild and file_size_str is None:
712                                                 mystr = str(mysum[0] / 1024)
713                                                 mycount = len(mystr)
714                                                 while (mycount > 3):
715                                                         mycount -= 3
716                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
717                                                 file_size_str = mystr + " kB"
718
719                                         if self.verbose:
720                                                 if available:
721                                                         print "     ", darkgreen("Latest version available:"),myversion
722                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
723                                                 if myebuild:
724                                                         print "      %s %s" % \
725                                                                 (darkgreen("Size of files:"), file_size_str)
726                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
727                                                 print "     ", darkgreen("Description:")+"  ",desc
728                                                 print "     ", darkgreen("License:")+"      ",license
729                                                 print
730         #
731         # private interface
732         #
733         def getInstallationStatus(self,package):
734                 installed_package = self.vartree.dep_bestmatch(package)
735                 result = ""
736                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737                 if len(version) > 0:
738                         result = darkgreen("Latest version installed:")+" "+version
739                 else:
740                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741                 return result
742
743         def getVersion(self,full_package,detail):
744                 if len(full_package) > 1:
745                         package_parts = portage.catpkgsplit(full_package)
746                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747                                 result = package_parts[2]+ "-" + package_parts[3]
748                         else:
749                                 result = package_parts[2]
750                 else:
751                         result = ""
752                 return result
753
754 class RootConfig(object):
755         """This is used internally by depgraph to track information about a
756         particular $ROOT."""
757
758         pkg_tree_map = {
759                 "ebuild"    : "porttree",
760                 "binary"    : "bintree",
761                 "installed" : "vartree"
762         }
763
764         tree_pkg_map = {}
765         for k, v in pkg_tree_map.iteritems():
766                 tree_pkg_map[v] = k
767
768         def __init__(self, settings, trees, setconfig):
769                 self.trees = trees
770                 self.settings = settings
771                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772                 self.root = self.settings["ROOT"]
773                 self.setconfig = setconfig
774                 self.sets = self.setconfig.getSets()
775                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
776
777 def create_world_atom(pkg, args_set, root_config):
778         """Create a new atom for the world file if one does not exist.  If the
779         argument atom is precise enough to identify a specific slot then a slot
780         atom will be returned. Atoms that are in the system set may also be stored
781         in world since system atoms can only match one slot while world atoms can
782         be greedy with respect to slots.  Unslotted system packages will not be
783         stored in world."""
784
785         arg_atom = args_set.findAtomForPackage(pkg)
786         if not arg_atom:
787                 return None
788         cp = portage.dep_getkey(arg_atom)
789         new_world_atom = cp
790         sets = root_config.sets
791         portdb = root_config.trees["porttree"].dbapi
792         vardb = root_config.trees["vartree"].dbapi
793         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
794                 for cpv in portdb.match(cp))
795         slotted = len(available_slots) > 1 or \
796                 (len(available_slots) == 1 and "0" not in available_slots)
797         if not slotted:
798                 # check the vdb in case this is multislot
799                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
800                         for cpv in vardb.match(cp))
801                 slotted = len(available_slots) > 1 or \
802                         (len(available_slots) == 1 and "0" not in available_slots)
803         if slotted and arg_atom != cp:
804                 # If the user gave a specific atom, store it as a
805                 # slot atom in the world file.
806                 slot_atom = pkg.slot_atom
807
808                 # For USE=multislot, there are a couple of cases to
809                 # handle here:
810                 #
811                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
812                 #    unknown value, so just record an unslotted atom.
813                 #
814                 # 2) SLOT comes from an installed package and there is no
815                 #    matching SLOT in the portage tree.
816                 #
817                 # Make sure that the slot atom is available in either the
818                 # portdb or the vardb, since otherwise the user certainly
819                 # doesn't want the SLOT atom recorded in the world file
820                 # (case 1 above).  If it's only available in the vardb,
821                 # the user may be trying to prevent a USE=multislot
822                 # package from being removed by --depclean (case 2 above).
823
824                 mydb = portdb
825                 if not portdb.match(slot_atom):
826                         # SLOT seems to come from an installed multislot package
827                         mydb = vardb
828                 # If there is no installed package matching the SLOT atom,
829                 # it probably changed SLOT spontaneously due to USE=multislot,
830                 # so just record an unslotted atom.
831                 if vardb.match(slot_atom):
832                         # Now verify that the argument is precise
833                         # enough to identify a specific slot.
834                         matches = mydb.match(arg_atom)
835                         matched_slots = set()
836                         for cpv in matches:
837                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
838                         if len(matched_slots) == 1:
839                                 new_world_atom = slot_atom
840
841         if new_world_atom == sets["world"].findAtomForPackage(pkg):
842                 # Both atoms would be identical, so there's nothing to add.
843                 return None
844         if not slotted:
845                 # Unlike world atoms, system atoms are not greedy for slots, so they
846                 # can't be safely excluded from world if they are slotted.
847                 system_atom = sets["system"].findAtomForPackage(pkg)
848                 if system_atom:
849                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
850                                 return None
851                         # System virtuals aren't safe to exclude from world since they can
852                         # match multiple old-style virtuals but only one of them will be
853                         # pulled in by update or depclean.
854                         providers = portdb.mysettings.getvirtuals().get(
855                                 portage.dep_getkey(system_atom))
856                         if providers and len(providers) == 1 and providers[0] == cp:
857                                 return None
858         return new_world_atom
859
860 def filter_iuse_defaults(iuse):
861         for flag in iuse:
862                 if flag.startswith("+") or flag.startswith("-"):
863                         yield flag[1:]
864                 else:
865                         yield flag
866
867 class SlotObject(object):
868         __slots__ = ("__weakref__",)
869
870         def __init__(self, **kwargs):
871                 classes = [self.__class__]
872                 while classes:
873                         c = classes.pop()
874                         if c is SlotObject:
875                                 continue
876                         classes.extend(c.__bases__)
877                         slots = getattr(c, "__slots__", None)
878                         if not slots:
879                                 continue
880                         for myattr in slots:
881                                 myvalue = kwargs.get(myattr, None)
882                                 setattr(self, myattr, myvalue)
883
884         def copy(self):
885                 """
886                 Create a new instance and copy all attributes
887                 defined from __slots__ (including those from
888                 inherited classes).
889                 """
890                 obj = self.__class__()
891
892                 classes = [self.__class__]
893                 while classes:
894                         c = classes.pop()
895                         if c is SlotObject:
896                                 continue
897                         classes.extend(c.__bases__)
898                         slots = getattr(c, "__slots__", None)
899                         if not slots:
900                                 continue
901                         for myattr in slots:
902                                 setattr(obj, myattr, getattr(self, myattr))
903
904                 return obj
905
906 class AbstractDepPriority(SlotObject):
907         __slots__ = ("buildtime", "runtime", "runtime_post")
908
909         def __lt__(self, other):
910                 return self.__int__() < other
911
912         def __le__(self, other):
913                 return self.__int__() <= other
914
915         def __eq__(self, other):
916                 return self.__int__() == other
917
918         def __ne__(self, other):
919                 return self.__int__() != other
920
921         def __gt__(self, other):
922                 return self.__int__() > other
923
924         def __ge__(self, other):
925                 return self.__int__() >= other
926
927         def copy(self):
928                 import copy
929                 return copy.copy(self)
930
931 class DepPriority(AbstractDepPriority):
932
933         __slots__ = ("satisfied", "optional", "rebuild")
934
935         def __int__(self):
936                 return 0
937
938         def __str__(self):
939                 if self.optional:
940                         return "optional"
941                 if self.buildtime:
942                         return "buildtime"
943                 if self.runtime:
944                         return "runtime"
945                 if self.runtime_post:
946                         return "runtime_post"
947                 return "soft"
948
949 class BlockerDepPriority(DepPriority):
950         __slots__ = ()
951         def __int__(self):
952                 return 0
953
954         def __str__(self):
955                 return 'blocker'
956
957 BlockerDepPriority.instance = BlockerDepPriority()
958
959 class UnmergeDepPriority(AbstractDepPriority):
960         __slots__ = ("optional", "satisfied",)
961         """
962         Combination of properties           Priority  Category
963
964         runtime                                0       HARD
965         runtime_post                          -1       HARD
966         buildtime                             -2       SOFT
967         (none of the above)                   -2       SOFT
968         """
969
970         MAX    =  0
971         SOFT   = -2
972         MIN    = -2
973
974         def __int__(self):
975                 if self.runtime:
976                         return 0
977                 if self.runtime_post:
978                         return -1
979                 if self.buildtime:
980                         return -2
981                 return -2
982
983         def __str__(self):
984                 myvalue = self.__int__()
985                 if myvalue > self.SOFT:
986                         return "hard"
987                 return "soft"
988
989 class DepPriorityNormalRange(object):
990         """
991         DepPriority properties              Index      Category
992
993         buildtime                                      HARD
994         runtime                                3       MEDIUM
995         runtime_post                           2       MEDIUM_SOFT
996         optional                               1       SOFT
997         (none of the above)                    0       NONE
998         """
999         MEDIUM      = 3
1000         MEDIUM_SOFT = 2
1001         SOFT        = 1
1002         NONE        = 0
1003
1004         @classmethod
1005         def _ignore_optional(cls, priority):
1006                 if priority.__class__ is not DepPriority:
1007                         return False
1008                 return bool(priority.optional)
1009
1010         @classmethod
1011         def _ignore_runtime_post(cls, priority):
1012                 if priority.__class__ is not DepPriority:
1013                         return False
1014                 return bool(priority.optional or priority.runtime_post)
1015
1016         @classmethod
1017         def _ignore_runtime(cls, priority):
1018                 if priority.__class__ is not DepPriority:
1019                         return False
1020                 return not priority.buildtime
1021
1022         ignore_medium      = _ignore_runtime
1023         ignore_medium_soft = _ignore_runtime_post
1024         ignore_soft        = _ignore_optional
1025
1026 DepPriorityNormalRange.ignore_priority = (
1027         None,
1028         DepPriorityNormalRange._ignore_optional,
1029         DepPriorityNormalRange._ignore_runtime_post,
1030         DepPriorityNormalRange._ignore_runtime
1031 )
1032
1033 class DepPrioritySatisfiedRange(object):
1034         """
1035         DepPriority                         Index      Category
1036
1037         not satisfied and buildtime                    HARD
1038         not satisfied and runtime              7       MEDIUM
1039         not satisfied and runtime_post         6       MEDIUM_SOFT
1040         satisfied and buildtime and rebuild    5       SOFT
1041         satisfied and buildtime                4       SOFT
1042         satisfied and runtime                  3       SOFT
1043         satisfied and runtime_post             2       SOFT
1044         optional                               1       SOFT
1045         (none of the above)                    0       NONE
1046         """
1047         MEDIUM      = 7
1048         MEDIUM_SOFT = 6
1049         SOFT        = 5
1050         NONE        = 0
1051
1052         @classmethod
1053         def _ignore_optional(cls, priority):
1054                 if priority.__class__ is not DepPriority:
1055                         return False
1056                 return bool(priority.optional)
1057
1058         @classmethod
1059         def _ignore_satisfied_runtime_post(cls, priority):
1060                 if priority.__class__ is not DepPriority:
1061                         return False
1062                 if priority.optional:
1063                         return True
1064                 if not priority.satisfied:
1065                         return False
1066                 return bool(priority.runtime_post)
1067
1068         @classmethod
1069         def _ignore_satisfied_runtime(cls, priority):
1070                 if priority.__class__ is not DepPriority:
1071                         return False
1072                 if priority.optional:
1073                         return True
1074                 if not priority.satisfied:
1075                         return False
1076                 return not priority.buildtime
1077
1078         @classmethod
1079         def _ignore_satisfied_buildtime(cls, priority):
1080                 if priority.__class__ is not DepPriority:
1081                         return False
1082                 if priority.optional:
1083                         return True
1084                 if not priority.satisfied:
1085                         return False
1086                 if priority.buildtime:
1087                         return not priority.rebuild
1088                 return True
1089
1090         @classmethod
1091         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1092                 if priority.__class__ is not DepPriority:
1093                         return False
1094                 if priority.optional:
1095                         return True
1096                 return bool(priority.satisfied)
1097
1098         @classmethod
1099         def _ignore_runtime_post(cls, priority):
1100                 if priority.__class__ is not DepPriority:
1101                         return False
1102                 return bool(priority.optional or \
1103                         priority.satisfied or \
1104                         priority.runtime_post)
1105
1106         @classmethod
1107         def _ignore_runtime(cls, priority):
1108                 if priority.__class__ is not DepPriority:
1109                         return False
1110                 return bool(priority.satisfied or \
1111                         not priority.buildtime)
1112
1113         ignore_medium      = _ignore_runtime
1114         ignore_medium_soft = _ignore_runtime_post
1115         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1116
1117 DepPrioritySatisfiedRange.ignore_priority = (
1118         None,
1119         DepPrioritySatisfiedRange._ignore_optional,
1120         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1121         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1122         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1123         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1124         DepPrioritySatisfiedRange._ignore_runtime_post,
1125         DepPrioritySatisfiedRange._ignore_runtime
1126 )
1127
1128 def _find_deep_system_runtime_deps(graph):
1129         deep_system_deps = set()
1130         node_stack = []
1131         for node in graph:
1132                 if not isinstance(node, Package) or \
1133                         node.operation == 'uninstall':
1134                         continue
1135                 if node.root_config.sets['system'].findAtomForPackage(node):
1136                         node_stack.append(node)
1137
1138         def ignore_priority(priority):
1139                 """
1140                 Ignore non-runtime priorities.
1141                 """
1142                 if isinstance(priority, DepPriority) and \
1143                         (priority.runtime or priority.runtime_post):
1144                         return False
1145                 return True
1146
1147         while node_stack:
1148                 node = node_stack.pop()
1149                 if node in deep_system_deps:
1150                         continue
1151                 deep_system_deps.add(node)
1152                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1153                         if not isinstance(child, Package) or \
1154                                 child.operation == 'uninstall':
1155                                 continue
1156                         node_stack.append(child)
1157
1158         return deep_system_deps
1159
1160 class FakeVartree(portage.vartree):
1161         """This is implements an in-memory copy of a vartree instance that provides
1162         all the interfaces required for use by the depgraph.  The vardb is locked
1163         during the constructor call just long enough to read a copy of the
1164         installed package information.  This allows the depgraph to do it's
1165         dependency calculations without holding a lock on the vardb.  It also
1166         allows things like vardb global updates to be done in memory so that the
1167         user doesn't necessarily need write access to the vardb in cases where
1168         global updates are necessary (updates are performed when necessary if there
1169         is not a matching ebuild in the tree)."""
1170         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1171                 self._root_config = root_config
1172                 if pkg_cache is None:
1173                         pkg_cache = {}
1174                 real_vartree = root_config.trees["vartree"]
1175                 portdb = root_config.trees["porttree"].dbapi
1176                 self.root = real_vartree.root
1177                 self.settings = real_vartree.settings
1178                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1179                 if "_mtime_" not in mykeys:
1180                         mykeys.append("_mtime_")
1181                 self._db_keys = mykeys
1182                 self._pkg_cache = pkg_cache
1183                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1184                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1185                 try:
1186                         # At least the parent needs to exist for the lock file.
1187                         portage.util.ensure_dirs(vdb_path)
1188                 except portage.exception.PortageException:
1189                         pass
1190                 vdb_lock = None
1191                 try:
1192                         if acquire_lock and os.access(vdb_path, os.W_OK):
1193                                 vdb_lock = portage.locks.lockdir(vdb_path)
1194                         real_dbapi = real_vartree.dbapi
1195                         slot_counters = {}
1196                         for cpv in real_dbapi.cpv_all():
1197                                 cache_key = ("installed", self.root, cpv, "nomerge")
1198                                 pkg = self._pkg_cache.get(cache_key)
1199                                 if pkg is not None:
1200                                         metadata = pkg.metadata
1201                                 else:
1202                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1203                                 myslot = metadata["SLOT"]
1204                                 mycp = portage.dep_getkey(cpv)
1205                                 myslot_atom = "%s:%s" % (mycp, myslot)
1206                                 try:
1207                                         mycounter = long(metadata["COUNTER"])
1208                                 except ValueError:
1209                                         mycounter = 0
1210                                         metadata["COUNTER"] = str(mycounter)
1211                                 other_counter = slot_counters.get(myslot_atom, None)
1212                                 if other_counter is not None:
1213                                         if other_counter > mycounter:
1214                                                 continue
1215                                 slot_counters[myslot_atom] = mycounter
1216                                 if pkg is None:
1217                                         pkg = Package(built=True, cpv=cpv,
1218                                                 installed=True, metadata=metadata,
1219                                                 root_config=root_config, type_name="installed")
1220                                 self._pkg_cache[pkg] = pkg
1221                                 self.dbapi.cpv_inject(pkg)
1222                         real_dbapi.flush_cache()
1223                 finally:
1224                         if vdb_lock:
1225                                 portage.locks.unlockdir(vdb_lock)
1226                 # Populate the old-style virtuals using the cached values.
1227                 if not self.settings.treeVirtuals:
1228                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1229                                 portage.getCPFromCPV, self.get_all_provides())
1230
1231                 # Intialize variables needed for lazy cache pulls of the live ebuild
1232                 # metadata.  This ensures that the vardb lock is released ASAP, without
1233                 # being delayed in case cache generation is triggered.
1234                 self._aux_get = self.dbapi.aux_get
1235                 self.dbapi.aux_get = self._aux_get_wrapper
1236                 self._match = self.dbapi.match
1237                 self.dbapi.match = self._match_wrapper
1238                 self._aux_get_history = set()
1239                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1240                 self._portdb = portdb
1241                 self._global_updates = None
1242
1243         def _match_wrapper(self, cpv, use_cache=1):
1244                 """
1245                 Make sure the metadata in Package instances gets updated for any
1246                 cpv that is returned from a match() call, since the metadata can
1247                 be accessed directly from the Package instance instead of via
1248                 aux_get().
1249                 """
1250                 matches = self._match(cpv, use_cache=use_cache)
1251                 for cpv in matches:
1252                         if cpv in self._aux_get_history:
1253                                 continue
1254                         self._aux_get_wrapper(cpv, [])
1255                 return matches
1256
1257         def _aux_get_wrapper(self, pkg, wants):
1258                 if pkg in self._aux_get_history:
1259                         return self._aux_get(pkg, wants)
1260                 self._aux_get_history.add(pkg)
1261                 try:
1262                         # Use the live ebuild metadata if possible.
1263                         live_metadata = dict(izip(self._portdb_keys,
1264                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1265                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1266                                 raise KeyError(pkg)
1267                         self.dbapi.aux_update(pkg, live_metadata)
1268                 except (KeyError, portage.exception.PortageException):
1269                         if self._global_updates is None:
1270                                 self._global_updates = \
1271                                         grab_global_updates(self._portdb.porttree_root)
1272                         perform_global_updates(
1273                                 pkg, self.dbapi, self._global_updates)
1274                 return self._aux_get(pkg, wants)
1275
1276         def sync(self, acquire_lock=1):
1277                 """
1278                 Call this method to synchronize state with the real vardb
1279                 after one or more packages may have been installed or
1280                 uninstalled.
1281                 """
1282                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1283                 try:
1284                         # At least the parent needs to exist for the lock file.
1285                         portage.util.ensure_dirs(vdb_path)
1286                 except portage.exception.PortageException:
1287                         pass
1288                 vdb_lock = None
1289                 try:
1290                         if acquire_lock and os.access(vdb_path, os.W_OK):
1291                                 vdb_lock = portage.locks.lockdir(vdb_path)
1292                         self._sync()
1293                 finally:
1294                         if vdb_lock:
1295                                 portage.locks.unlockdir(vdb_lock)
1296
1297         def _sync(self):
1298
1299                 real_vardb = self._root_config.trees["vartree"].dbapi
1300                 current_cpv_set = frozenset(real_vardb.cpv_all())
1301                 pkg_vardb = self.dbapi
1302                 aux_get_history = self._aux_get_history
1303
1304                 # Remove any packages that have been uninstalled.
1305                 for pkg in list(pkg_vardb):
1306                         if pkg.cpv not in current_cpv_set:
1307                                 pkg_vardb.cpv_remove(pkg)
1308                                 aux_get_history.discard(pkg.cpv)
1309
1310                 # Validate counters and timestamps.
1311                 slot_counters = {}
1312                 root = self.root
1313                 validation_keys = ["COUNTER", "_mtime_"]
1314                 for cpv in current_cpv_set:
1315
1316                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1317                         pkg = pkg_vardb.get(pkg_hash_key)
1318                         if pkg is not None:
1319                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1320                                 try:
1321                                         counter = long(counter)
1322                                 except ValueError:
1323                                         counter = 0
1324
1325                                 if counter != pkg.counter or \
1326                                         mtime != pkg.mtime:
1327                                         pkg_vardb.cpv_remove(pkg)
1328                                         aux_get_history.discard(pkg.cpv)
1329                                         pkg = None
1330
1331                         if pkg is None:
1332                                 pkg = self._pkg(cpv)
1333
1334                         other_counter = slot_counters.get(pkg.slot_atom)
1335                         if other_counter is not None:
1336                                 if other_counter > pkg.counter:
1337                                         continue
1338
1339                         slot_counters[pkg.slot_atom] = pkg.counter
1340                         pkg_vardb.cpv_inject(pkg)
1341
1342                 real_vardb.flush_cache()
1343
1344         def _pkg(self, cpv):
1345                 root_config = self._root_config
1346                 real_vardb = root_config.trees["vartree"].dbapi
1347                 pkg = Package(cpv=cpv, installed=True,
1348                         metadata=izip(self._db_keys,
1349                         real_vardb.aux_get(cpv, self._db_keys)),
1350                         root_config=root_config,
1351                         type_name="installed")
1352
1353                 try:
1354                         mycounter = long(pkg.metadata["COUNTER"])
1355                 except ValueError:
1356                         mycounter = 0
1357                         pkg.metadata["COUNTER"] = str(mycounter)
1358
1359                 return pkg
1360
1361 def grab_global_updates(portdir):
1362         from portage.update import grab_updates, parse_updates
1363         updpath = os.path.join(portdir, "profiles", "updates")
1364         try:
1365                 rawupdates = grab_updates(updpath)
1366         except portage.exception.DirectoryNotFound:
1367                 rawupdates = []
1368         upd_commands = []
1369         for mykey, mystat, mycontent in rawupdates:
1370                 commands, errors = parse_updates(mycontent)
1371                 upd_commands.extend(commands)
1372         return upd_commands
1373
1374 def perform_global_updates(mycpv, mydb, mycommands):
1375         from portage.update import update_dbentries
1376         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1377         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1378         updates = update_dbentries(mycommands, aux_dict)
1379         if updates:
1380                 mydb.aux_update(mycpv, updates)
1381
1382 def visible(pkgsettings, pkg):
1383         """
1384         Check if a package is visible. This can raise an InvalidDependString
1385         exception if LICENSE is invalid.
1386         TODO: optionally generate a list of masking reasons
1387         @rtype: Boolean
1388         @returns: True if the package is visible, False otherwise.
1389         """
1390         if not pkg.metadata["SLOT"]:
1391                 return False
1392         if not pkg.installed:
1393                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1394                         return False
1395         eapi = pkg.metadata["EAPI"]
1396         if not portage.eapi_is_supported(eapi):
1397                 return False
1398         if not pkg.installed:
1399                 if portage._eapi_is_deprecated(eapi):
1400                         return False
1401                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1402                         return False
1403         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406                 return False
1407         try:
1408                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1409                         return False
1410         except portage.exception.InvalidDependString:
1411                 return False
1412         return True
1413
1414 def get_masking_status(pkg, pkgsettings, root_config):
1415
1416         mreasons = portage.getmaskingstatus(
1417                 pkg, settings=pkgsettings,
1418                 portdb=root_config.trees["porttree"].dbapi)
1419
1420         if not pkg.installed:
1421                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1422                         mreasons.append("CHOST: %s" % \
1423                                 pkg.metadata["CHOST"])
1424
1425         if not pkg.metadata["SLOT"]:
1426                 mreasons.append("invalid: SLOT is undefined")
1427
1428         return mreasons
1429
1430 def get_mask_info(root_config, cpv, pkgsettings,
1431         db, pkg_type, built, installed, db_keys):
1432         eapi_masked = False
1433         try:
1434                 metadata = dict(izip(db_keys,
1435                         db.aux_get(cpv, db_keys)))
1436         except KeyError:
1437                 metadata = None
1438         if metadata and not built:
1439                 pkgsettings.setcpv(cpv, mydb=metadata)
1440                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1441                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1442         if metadata is None:
1443                 mreasons = ["corruption"]
1444         else:
1445                 eapi = metadata['EAPI']
1446                 if eapi[:1] == '-':
1447                         eapi = eapi[1:]
1448                 if not portage.eapi_is_supported(eapi):
1449                         mreasons = ['EAPI %s' % eapi]
1450                 else:
1451                         pkg = Package(type_name=pkg_type, root_config=root_config,
1452                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1453                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1454         return metadata, mreasons
1455
1456 def show_masked_packages(masked_packages):
1457         shown_licenses = set()
1458         shown_comments = set()
1459         # Maybe there is both an ebuild and a binary. Only
1460         # show one of them to avoid redundant appearance.
1461         shown_cpvs = set()
1462         have_eapi_mask = False
1463         for (root_config, pkgsettings, cpv,
1464                 metadata, mreasons) in masked_packages:
1465                 if cpv in shown_cpvs:
1466                         continue
1467                 shown_cpvs.add(cpv)
1468                 comment, filename = None, None
1469                 if "package.mask" in mreasons:
1470                         comment, filename = \
1471                                 portage.getmaskingreason(
1472                                 cpv, metadata=metadata,
1473                                 settings=pkgsettings,
1474                                 portdb=root_config.trees["porttree"].dbapi,
1475                                 return_location=True)
1476                 missing_licenses = []
1477                 if metadata:
1478                         if not portage.eapi_is_supported(metadata["EAPI"]):
1479                                 have_eapi_mask = True
1480                         try:
1481                                 missing_licenses = \
1482                                         pkgsettings._getMissingLicenses(
1483                                                 cpv, metadata)
1484                         except portage.exception.InvalidDependString:
1485                                 # This will have already been reported
1486                                 # above via mreasons.
1487                                 pass
1488
1489                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1490                 if comment and comment not in shown_comments:
1491                         print filename+":"
1492                         print comment
1493                         shown_comments.add(comment)
1494                 portdb = root_config.trees["porttree"].dbapi
1495                 for l in missing_licenses:
1496                         l_path = portdb.findLicensePath(l)
1497                         if l in shown_licenses:
1498                                 continue
1499                         msg = ("A copy of the '%s' license" + \
1500                         " is located at '%s'.") % (l, l_path)
1501                         print msg
1502                         print
1503                         shown_licenses.add(l)
1504         return have_eapi_mask
1505
1506 class Task(SlotObject):
1507         __slots__ = ("_hash_key", "_hash_value")
1508
1509         def _get_hash_key(self):
1510                 hash_key = getattr(self, "_hash_key", None)
1511                 if hash_key is None:
1512                         raise NotImplementedError(self)
1513                 return hash_key
1514
1515         def __eq__(self, other):
1516                 return self._get_hash_key() == other
1517
1518         def __ne__(self, other):
1519                 return self._get_hash_key() != other
1520
1521         def __hash__(self):
1522                 hash_value = getattr(self, "_hash_value", None)
1523                 if hash_value is None:
1524                         self._hash_value = hash(self._get_hash_key())
1525                 return self._hash_value
1526
1527         def __len__(self):
1528                 return len(self._get_hash_key())
1529
1530         def __getitem__(self, key):
1531                 return self._get_hash_key()[key]
1532
1533         def __iter__(self):
1534                 return iter(self._get_hash_key())
1535
1536         def __contains__(self, key):
1537                 return key in self._get_hash_key()
1538
1539         def __str__(self):
1540                 return str(self._get_hash_key())
1541
1542 class Blocker(Task):
1543
1544         __hash__ = Task.__hash__
1545         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1546
1547         def __init__(self, **kwargs):
1548                 Task.__init__(self, **kwargs)
1549                 self.cp = portage.dep_getkey(self.atom)
1550
1551         def _get_hash_key(self):
1552                 hash_key = getattr(self, "_hash_key", None)
1553                 if hash_key is None:
1554                         self._hash_key = \
1555                                 ("blocks", self.root, self.atom, self.eapi)
1556                 return self._hash_key
1557
1558 class Package(Task):
1559
1560         __hash__ = Task.__hash__
1561         __slots__ = ("built", "cpv", "depth",
1562                 "installed", "metadata", "onlydeps", "operation",
1563                 "root_config", "type_name",
1564                 "category", "counter", "cp", "cpv_split",
1565                 "inherited", "iuse", "mtime",
1566                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1567
1568         metadata_keys = [
1569                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1570                 "INHERITED", "IUSE", "KEYWORDS",
1571                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1572                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1573
1574         def __init__(self, **kwargs):
1575                 Task.__init__(self, **kwargs)
1576                 self.root = self.root_config.root
1577                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1578                 self.cp = portage.cpv_getkey(self.cpv)
1579                 slot = self.slot
1580                 if not slot:
1581                         # Avoid an InvalidAtom exception when creating slot_atom.
1582                         # This package instance will be masked due to empty SLOT.
1583                         slot = '0'
1584                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1585                 self.category, self.pf = portage.catsplit(self.cpv)
1586                 self.cpv_split = portage.catpkgsplit(self.cpv)
1587                 self.pv_split = self.cpv_split[1:]
1588
1589         class _use(object):
1590
1591                 __slots__ = ("__weakref__", "enabled")
1592
1593                 def __init__(self, use):
1594                         self.enabled = frozenset(use)
1595
1596         class _iuse(object):
1597
1598                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1599
1600                 def __init__(self, tokens, iuse_implicit):
1601                         self.tokens = tuple(tokens)
1602                         self.iuse_implicit = iuse_implicit
1603                         enabled = []
1604                         disabled = []
1605                         other = []
1606                         for x in tokens:
1607                                 prefix = x[:1]
1608                                 if prefix == "+":
1609                                         enabled.append(x[1:])
1610                                 elif prefix == "-":
1611                                         disabled.append(x[1:])
1612                                 else:
1613                                         other.append(x)
1614                         self.enabled = frozenset(enabled)
1615                         self.disabled = frozenset(disabled)
1616                         self.all = frozenset(chain(enabled, disabled, other))
1617
1618                 def __getattribute__(self, name):
1619                         if name == "regex":
1620                                 try:
1621                                         return object.__getattribute__(self, "regex")
1622                                 except AttributeError:
1623                                         all = object.__getattribute__(self, "all")
1624                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1625                                         # Escape anything except ".*" which is supposed
1626                                         # to pass through from _get_implicit_iuse()
1627                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1628                                         regex = "^(%s)$" % "|".join(regex)
1629                                         regex = regex.replace("\\.\\*", ".*")
1630                                         self.regex = re.compile(regex)
1631                         return object.__getattribute__(self, name)
1632
1633         def _get_hash_key(self):
1634                 hash_key = getattr(self, "_hash_key", None)
1635                 if hash_key is None:
1636                         if self.operation is None:
1637                                 self.operation = "merge"
1638                                 if self.onlydeps or self.installed:
1639                                         self.operation = "nomerge"
1640                         self._hash_key = \
1641                                 (self.type_name, self.root, self.cpv, self.operation)
1642                 return self._hash_key
1643
1644         def __lt__(self, other):
1645                 if other.cp != self.cp:
1646                         return False
1647                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1648                         return True
1649                 return False
1650
1651         def __le__(self, other):
1652                 if other.cp != self.cp:
1653                         return False
1654                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1655                         return True
1656                 return False
1657
1658         def __gt__(self, other):
1659                 if other.cp != self.cp:
1660                         return False
1661                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1662                         return True
1663                 return False
1664
1665         def __ge__(self, other):
1666                 if other.cp != self.cp:
1667                         return False
1668                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1669                         return True
1670                 return False
1671
1672 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1673         if not x.startswith("UNUSED_"))
1674 _all_metadata_keys.discard("CDEPEND")
1675 _all_metadata_keys.update(Package.metadata_keys)
1676
1677 from portage.cache.mappings import slot_dict_class
1678 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1679
1680 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1681         """
1682         Detect metadata updates and synchronize Package attributes.
1683         """
1684
1685         __slots__ = ("_pkg",)
1686         _wrapped_keys = frozenset(
1687                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1688
1689         def __init__(self, pkg, metadata):
1690                 _PackageMetadataWrapperBase.__init__(self)
1691                 self._pkg = pkg
1692                 self.update(metadata)
1693
1694         def __setitem__(self, k, v):
1695                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1696                 if k in self._wrapped_keys:
1697                         getattr(self, "_set_" + k.lower())(k, v)
1698
1699         def _set_inherited(self, k, v):
1700                 if isinstance(v, basestring):
1701                         v = frozenset(v.split())
1702                 self._pkg.inherited = v
1703
1704         def _set_iuse(self, k, v):
1705                 self._pkg.iuse = self._pkg._iuse(
1706                         v.split(), self._pkg.root_config.iuse_implicit)
1707
1708         def _set_slot(self, k, v):
1709                 self._pkg.slot = v
1710
1711         def _set_use(self, k, v):
1712                 self._pkg.use = self._pkg._use(v.split())
1713
1714         def _set_counter(self, k, v):
1715                 if isinstance(v, basestring):
1716                         try:
1717                                 v = long(v.strip())
1718                         except ValueError:
1719                                 v = 0
1720                 self._pkg.counter = v
1721
1722         def _set__mtime_(self, k, v):
1723                 if isinstance(v, basestring):
1724                         try:
1725                                 v = long(v.strip())
1726                         except ValueError:
1727                                 v = 0
1728                 self._pkg.mtime = v
1729
1730 class EbuildFetchonly(SlotObject):
1731
1732         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1733
1734         def execute(self):
1735                 settings = self.settings
1736                 pkg = self.pkg
1737                 portdb = pkg.root_config.trees["porttree"].dbapi
1738                 ebuild_path = portdb.findname(pkg.cpv)
1739                 settings.setcpv(pkg)
1740                 debug = settings.get("PORTAGE_DEBUG") == "1"
1741                 use_cache = 1 # always true
1742                 portage.doebuild_environment(ebuild_path, "fetch",
1743                         settings["ROOT"], settings, debug, use_cache, portdb)
1744                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1745
1746                 if restrict_fetch:
1747                         rval = self._execute_with_builddir()
1748                 else:
1749                         rval = portage.doebuild(ebuild_path, "fetch",
1750                                 settings["ROOT"], settings, debug=debug,
1751                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1752                                 mydbapi=portdb, tree="porttree")
1753
1754                         if rval != os.EX_OK:
1755                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1756                                 eerror(msg, phase="unpack", key=pkg.cpv)
1757
1758                 return rval
1759
1760         def _execute_with_builddir(self):
1761                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1762                 # ensuring sane $PWD (bug #239560) and storing elog
1763                 # messages. Use a private temp directory, in order
1764                 # to avoid locking the main one.
1765                 settings = self.settings
1766                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1767                 from tempfile import mkdtemp
1768                 try:
1769                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1770                 except OSError, e:
1771                         if e.errno != portage.exception.PermissionDenied.errno:
1772                                 raise
1773                         raise portage.exception.PermissionDenied(global_tmpdir)
1774                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1775                 settings.backup_changes("PORTAGE_TMPDIR")
1776                 try:
1777                         retval = self._execute()
1778                 finally:
1779                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1780                         settings.backup_changes("PORTAGE_TMPDIR")
1781                         shutil.rmtree(private_tmpdir)
1782                 return retval
1783
1784         def _execute(self):
1785                 settings = self.settings
1786                 pkg = self.pkg
1787                 root_config = pkg.root_config
1788                 portdb = root_config.trees["porttree"].dbapi
1789                 ebuild_path = portdb.findname(pkg.cpv)
1790                 debug = settings.get("PORTAGE_DEBUG") == "1"
1791                 retval = portage.doebuild(ebuild_path, "fetch",
1792                         self.settings["ROOT"], self.settings, debug=debug,
1793                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1794                         mydbapi=portdb, tree="porttree")
1795
1796                 if retval != os.EX_OK:
1797                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1798                         eerror(msg, phase="unpack", key=pkg.cpv)
1799
1800                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1801                 return retval
1802
1803 class PollConstants(object):
1804
1805         """
1806         Provides POLL* constants that are equivalent to those from the
1807         select module, for use by PollSelectAdapter.
1808         """
1809
1810         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1811         v = 1
1812         for k in names:
1813                 locals()[k] = getattr(select, k, v)
1814                 v *= 2
1815         del k, v
1816
1817 class AsynchronousTask(SlotObject):
1818         """
1819         Subclasses override _wait() and _poll() so that calls
1820         to public methods can be wrapped for implementing
1821         hooks such as exit listener notification.
1822
1823         Sublasses should call self.wait() to notify exit listeners after
1824         the task is complete and self.returncode has been set.
1825         """
1826
1827         __slots__ = ("background", "cancelled", "returncode") + \
1828                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1829
1830         def start(self):
1831                 """
1832                 Start an asynchronous task and then return as soon as possible.
1833                 """
1834                 self._start_hook()
1835                 self._start()
1836
1837         def _start(self):
1838                 raise NotImplementedError(self)
1839
1840         def isAlive(self):
1841                 return self.returncode is None
1842
1843         def poll(self):
1844                 self._wait_hook()
1845                 return self._poll()
1846
1847         def _poll(self):
1848                 return self.returncode
1849
1850         def wait(self):
1851                 if self.returncode is None:
1852                         self._wait()
1853                 self._wait_hook()
1854                 return self.returncode
1855
1856         def _wait(self):
1857                 return self.returncode
1858
1859         def cancel(self):
1860                 self.cancelled = True
1861                 self.wait()
1862
1863         def addStartListener(self, f):
1864                 """
1865                 The function will be called with one argument, a reference to self.
1866                 """
1867                 if self._start_listeners is None:
1868                         self._start_listeners = []
1869                 self._start_listeners.append(f)
1870
1871         def removeStartListener(self, f):
1872                 if self._start_listeners is None:
1873                         return
1874                 self._start_listeners.remove(f)
1875
1876         def _start_hook(self):
1877                 if self._start_listeners is not None:
1878                         start_listeners = self._start_listeners
1879                         self._start_listeners = None
1880
1881                         for f in start_listeners:
1882                                 f(self)
1883
1884         def addExitListener(self, f):
1885                 """
1886                 The function will be called with one argument, a reference to self.
1887                 """
1888                 if self._exit_listeners is None:
1889                         self._exit_listeners = []
1890                 self._exit_listeners.append(f)
1891
1892         def removeExitListener(self, f):
1893                 if self._exit_listeners is None:
1894                         if self._exit_listener_stack is not None:
1895                                 self._exit_listener_stack.remove(f)
1896                         return
1897                 self._exit_listeners.remove(f)
1898
1899         def _wait_hook(self):
1900                 """
1901                 Call this method after the task completes, just before returning
1902                 the returncode from wait() or poll(). This hook is
1903                 used to trigger exit listeners when the returncode first
1904                 becomes available.
1905                 """
1906                 if self.returncode is not None and \
1907                         self._exit_listeners is not None:
1908
1909                         # This prevents recursion, in case one of the
1910                         # exit handlers triggers this method again by
1911                         # calling wait(). Use a stack that gives
1912                         # removeExitListener() an opportunity to consume
1913                         # listeners from the stack, before they can get
1914                         # called below. This is necessary because a call
1915                         # to one exit listener may result in a call to
1916                         # removeExitListener() for another listener on
1917                         # the stack. That listener needs to be removed
1918                         # from the stack since it would be inconsistent
1919                         # to call it after it has been been passed into
1920                         # removeExitListener().
1921                         self._exit_listener_stack = self._exit_listeners
1922                         self._exit_listeners = None
1923
1924                         self._exit_listener_stack.reverse()
1925                         while self._exit_listener_stack:
1926                                 self._exit_listener_stack.pop()(self)
1927
1928 class AbstractPollTask(AsynchronousTask):
1929
1930         __slots__ = ("scheduler",) + \
1931                 ("_registered",)
1932
1933         _bufsize = 4096
1934         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1935         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1936                 _exceptional_events
1937
1938         def _unregister(self):
1939                 raise NotImplementedError(self)
1940
1941         def _unregister_if_appropriate(self, event):
1942                 if self._registered:
1943                         if event & self._exceptional_events:
1944                                 self._unregister()
1945                                 self.cancel()
1946                         elif event & PollConstants.POLLHUP:
1947                                 self._unregister()
1948                                 self.wait()
1949
1950 class PipeReader(AbstractPollTask):
1951
1952         """
1953         Reads output from one or more files and saves it in memory,
1954         for retrieval via the getvalue() method. This is driven by
1955         the scheduler's poll() loop, so it runs entirely within the
1956         current process.
1957         """
1958
1959         __slots__ = ("input_files",) + \
1960                 ("_read_data", "_reg_ids")
1961
1962         def _start(self):
1963                 self._reg_ids = set()
1964                 self._read_data = []
1965                 for k, f in self.input_files.iteritems():
1966                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1967                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1968                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1969                                 self._registered_events, self._output_handler))
1970                 self._registered = True
1971
1972         def isAlive(self):
1973                 return self._registered
1974
1975         def cancel(self):
1976                 if self.returncode is None:
1977                         self.returncode = 1
1978                         self.cancelled = True
1979                 self.wait()
1980
1981         def _wait(self):
1982                 if self.returncode is not None:
1983                         return self.returncode
1984
1985                 if self._registered:
1986                         self.scheduler.schedule(self._reg_ids)
1987                         self._unregister()
1988
1989                 self.returncode = os.EX_OK
1990                 return self.returncode
1991
1992         def getvalue(self):
1993                 """Retrieve the entire contents"""
1994                 if sys.hexversion >= 0x3000000:
1995                         return bytes().join(self._read_data)
1996                 return "".join(self._read_data)
1997
1998         def close(self):
1999                 """Free the memory buffer."""
2000                 self._read_data = None
2001
2002         def _output_handler(self, fd, event):
2003
2004                 if event & PollConstants.POLLIN:
2005
2006                         for f in self.input_files.itervalues():
2007                                 if fd == f.fileno():
2008                                         break
2009
2010                         buf = array.array('B')
2011                         try:
2012                                 buf.fromfile(f, self._bufsize)
2013                         except EOFError:
2014                                 pass
2015
2016                         if buf:
2017                                 self._read_data.append(buf.tostring())
2018                         else:
2019                                 self._unregister()
2020                                 self.wait()
2021
2022                 self._unregister_if_appropriate(event)
2023                 return self._registered
2024
2025         def _unregister(self):
2026                 """
2027                 Unregister from the scheduler and close open files.
2028                 """
2029
2030                 self._registered = False
2031
2032                 if self._reg_ids is not None:
2033                         for reg_id in self._reg_ids:
2034                                 self.scheduler.unregister(reg_id)
2035                         self._reg_ids = None
2036
2037                 if self.input_files is not None:
2038                         for f in self.input_files.itervalues():
2039                                 f.close()
2040                         self.input_files = None
2041
2042 class CompositeTask(AsynchronousTask):
2043
2044         __slots__ = ("scheduler",) + ("_current_task",)
2045
2046         def isAlive(self):
2047                 return self._current_task is not None
2048
2049         def cancel(self):
2050                 self.cancelled = True
2051                 if self._current_task is not None:
2052                         self._current_task.cancel()
2053
2054         def _poll(self):
2055                 """
2056                 This does a loop calling self._current_task.poll()
2057                 repeatedly as long as the value of self._current_task
2058                 keeps changing. It calls poll() a maximum of one time
2059                 for a given self._current_task instance. This is useful
2060                 since calling poll() on a task can trigger advance to
2061                 the next task could eventually lead to the returncode
2062                 being set in cases when polling only a single task would
2063                 not have the same effect.
2064                 """
2065
2066                 prev = None
2067                 while True:
2068                         task = self._current_task
2069                         if task is None or task is prev:
2070                                 # don't poll the same task more than once
2071                                 break
2072                         task.poll()
2073                         prev = task
2074
2075                 return self.returncode
2076
2077         def _wait(self):
2078
2079                 prev = None
2080                 while True:
2081                         task = self._current_task
2082                         if task is None:
2083                                 # don't wait for the same task more than once
2084                                 break
2085                         if task is prev:
2086                                 # Before the task.wait() method returned, an exit
2087                                 # listener should have set self._current_task to either
2088                                 # a different task or None. Something is wrong.
2089                                 raise AssertionError("self._current_task has not " + \
2090                                         "changed since calling wait", self, task)
2091                         task.wait()
2092                         prev = task
2093
2094                 return self.returncode
2095
2096         def _assert_current(self, task):
2097                 """
2098                 Raises an AssertionError if the given task is not the
2099                 same one as self._current_task. This can be useful
2100                 for detecting bugs.
2101                 """
2102                 if task is not self._current_task:
2103                         raise AssertionError("Unrecognized task: %s" % (task,))
2104
2105         def _default_exit(self, task):
2106                 """
2107                 Calls _assert_current() on the given task and then sets the
2108                 composite returncode attribute if task.returncode != os.EX_OK.
2109                 If the task failed then self._current_task will be set to None.
2110                 Subclasses can use this as a generic task exit callback.
2111
2112                 @rtype: int
2113                 @returns: The task.returncode attribute.
2114                 """
2115                 self._assert_current(task)
2116                 if task.returncode != os.EX_OK:
2117                         self.returncode = task.returncode
2118                         self._current_task = None
2119                 return task.returncode
2120
2121         def _final_exit(self, task):
2122                 """
2123                 Assumes that task is the final task of this composite task.
2124                 Calls _default_exit() and sets self.returncode to the task's
2125                 returncode and sets self._current_task to None.
2126                 """
2127                 self._default_exit(task)
2128                 self._current_task = None
2129                 self.returncode = task.returncode
2130                 return self.returncode
2131
2132         def _default_final_exit(self, task):
2133                 """
2134                 This calls _final_exit() and then wait().
2135
2136                 Subclasses can use this as a generic final task exit callback.
2137
2138                 """
2139                 self._final_exit(task)
2140                 return self.wait()
2141
2142         def _start_task(self, task, exit_handler):
2143                 """
2144                 Register exit handler for the given task, set it
2145                 as self._current_task, and call task.start().
2146
2147                 Subclasses can use this as a generic way to start
2148                 a task.
2149
2150                 """
2151                 task.addExitListener(exit_handler)
2152                 self._current_task = task
2153                 task.start()
2154
2155 class TaskSequence(CompositeTask):
2156         """
2157         A collection of tasks that executes sequentially. Each task
2158         must have a addExitListener() method that can be used as
2159         a means to trigger movement from one task to the next.
2160         """
2161
2162         __slots__ = ("_task_queue",)
2163
2164         def __init__(self, **kwargs):
2165                 AsynchronousTask.__init__(self, **kwargs)
2166                 self._task_queue = deque()
2167
2168         def add(self, task):
2169                 self._task_queue.append(task)
2170
2171         def _start(self):
2172                 self._start_next_task()
2173
2174         def cancel(self):
2175                 self._task_queue.clear()
2176                 CompositeTask.cancel(self)
2177
2178         def _start_next_task(self):
2179                 self._start_task(self._task_queue.popleft(),
2180                         self._task_exit_handler)
2181
2182         def _task_exit_handler(self, task):
2183                 if self._default_exit(task) != os.EX_OK:
2184                         self.wait()
2185                 elif self._task_queue:
2186                         self._start_next_task()
2187                 else:
2188                         self._final_exit(task)
2189                         self.wait()
2190
2191 class SubProcess(AbstractPollTask):
2192
2193         __slots__ = ("pid",) + \
2194                 ("_files", "_reg_id")
2195
2196         # A file descriptor is required for the scheduler to monitor changes from
2197         # inside a poll() loop. When logging is not enabled, create a pipe just to
2198         # serve this purpose alone.
2199         _dummy_pipe_fd = 9
2200
2201         def _poll(self):
2202                 if self.returncode is not None:
2203                         return self.returncode
2204                 if self.pid is None:
2205                         return self.returncode
2206                 if self._registered:
2207                         return self.returncode
2208
2209                 try:
2210                         retval = os.waitpid(self.pid, os.WNOHANG)
2211                 except OSError, e:
2212                         if e.errno != errno.ECHILD:
2213                                 raise
2214                         del e
2215                         retval = (self.pid, 1)
2216
2217                 if retval == (0, 0):
2218                         return None
2219                 self._set_returncode(retval)
2220                 return self.returncode
2221
2222         def cancel(self):
2223                 if self.isAlive():
2224                         try:
2225                                 os.kill(self.pid, signal.SIGTERM)
2226                         except OSError, e:
2227                                 if e.errno != errno.ESRCH:
2228                                         raise
2229                                 del e
2230
2231                 self.cancelled = True
2232                 if self.pid is not None:
2233                         self.wait()
2234                 return self.returncode
2235
2236         def isAlive(self):
2237                 return self.pid is not None and \
2238                         self.returncode is None
2239
2240         def _wait(self):
2241
2242                 if self.returncode is not None:
2243                         return self.returncode
2244
2245                 if self._registered:
2246                         self.scheduler.schedule(self._reg_id)
2247                         self._unregister()
2248                         if self.returncode is not None:
2249                                 return self.returncode
2250
2251                 try:
2252                         wait_retval = os.waitpid(self.pid, 0)
2253                 except OSError, e:
2254                         if e.errno != errno.ECHILD:
2255                                 raise
2256                         del e
2257                         self._set_returncode((self.pid, 1))
2258                 else:
2259                         self._set_returncode(wait_retval)
2260
2261                 return self.returncode
2262
2263         def _unregister(self):
2264                 """
2265                 Unregister from the scheduler and close open files.
2266                 """
2267
2268                 self._registered = False
2269
2270                 if self._reg_id is not None:
2271                         self.scheduler.unregister(self._reg_id)
2272                         self._reg_id = None
2273
2274                 if self._files is not None:
2275                         for f in self._files.itervalues():
2276                                 f.close()
2277                         self._files = None
2278
2279         def _set_returncode(self, wait_retval):
2280
2281                 retval = wait_retval[1]
2282
2283                 if retval != os.EX_OK:
2284                         if retval & 0xff:
2285                                 retval = (retval & 0xff) << 8
2286                         else:
2287                                 retval = retval >> 8
2288
2289                 self.returncode = retval
2290
2291 class SpawnProcess(SubProcess):
2292
2293         """
2294         Constructor keyword args are passed into portage.process.spawn().
2295         The required "args" keyword argument will be passed as the first
2296         spawn() argument.
2297         """
2298
2299         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2300                 "uid", "gid", "groups", "umask", "logfile",
2301                 "path_lookup", "pre_exec")
2302
2303         __slots__ = ("args",) + \
2304                 _spawn_kwarg_names
2305
2306         _file_names = ("log", "process", "stdout")
2307         _files_dict = slot_dict_class(_file_names, prefix="")
2308
2309         def _start(self):
2310
2311                 if self.cancelled:
2312                         return
2313
2314                 if self.fd_pipes is None:
2315                         self.fd_pipes = {}
2316                 fd_pipes = self.fd_pipes
2317                 fd_pipes.setdefault(0, sys.stdin.fileno())
2318                 fd_pipes.setdefault(1, sys.stdout.fileno())
2319                 fd_pipes.setdefault(2, sys.stderr.fileno())
2320
2321                 # flush any pending output
2322                 for fd in fd_pipes.itervalues():
2323                         if fd == sys.stdout.fileno():
2324                                 sys.stdout.flush()
2325                         if fd == sys.stderr.fileno():
2326                                 sys.stderr.flush()
2327
2328                 logfile = self.logfile
2329                 self._files = self._files_dict()
2330                 files = self._files
2331
2332                 master_fd, slave_fd = self._pipe(fd_pipes)
2333                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2334                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2335
2336                 null_input = None
2337                 fd_pipes_orig = fd_pipes.copy()
2338                 if self.background:
2339                         # TODO: Use job control functions like tcsetpgrp() to control
2340                         # access to stdin. Until then, use /dev/null so that any
2341                         # attempts to read from stdin will immediately return EOF
2342                         # instead of blocking indefinitely.
2343                         null_input = open('/dev/null', 'rb')
2344                         fd_pipes[0] = null_input.fileno()
2345                 else:
2346                         fd_pipes[0] = fd_pipes_orig[0]
2347
2348                 files.process = os.fdopen(master_fd, 'rb')
2349                 if logfile is not None:
2350
2351                         fd_pipes[1] = slave_fd
2352                         fd_pipes[2] = slave_fd
2353
2354                         files.log = open(logfile, mode='ab')
2355                         portage.util.apply_secpass_permissions(logfile,
2356                                 uid=portage.portage_uid, gid=portage.portage_gid,
2357                                 mode=0660)
2358
2359                         if not self.background:
2360                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2361
2362                         output_handler = self._output_handler
2363
2364                 else:
2365
2366                         # Create a dummy pipe so the scheduler can monitor
2367                         # the process from inside a poll() loop.
2368                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2369                         if self.background:
2370                                 fd_pipes[1] = slave_fd
2371                                 fd_pipes[2] = slave_fd
2372                         output_handler = self._dummy_handler
2373
2374                 kwargs = {}
2375                 for k in self._spawn_kwarg_names:
2376                         v = getattr(self, k)
2377                         if v is not None:
2378                                 kwargs[k] = v
2379
2380                 kwargs["fd_pipes"] = fd_pipes
2381                 kwargs["returnpid"] = True
2382                 kwargs.pop("logfile", None)
2383
2384                 self._reg_id = self.scheduler.register(files.process.fileno(),
2385                         self._registered_events, output_handler)
2386                 self._registered = True
2387
2388                 retval = self._spawn(self.args, **kwargs)
2389
2390                 os.close(slave_fd)
2391                 if null_input is not None:
2392                         null_input.close()
2393
2394                 if isinstance(retval, int):
2395                         # spawn failed
2396                         self._unregister()
2397                         self.returncode = retval
2398                         self.wait()
2399                         return
2400
2401                 self.pid = retval[0]
2402                 portage.process.spawned_pids.remove(self.pid)
2403
2404         def _pipe(self, fd_pipes):
2405                 """
2406                 @type fd_pipes: dict
2407                 @param fd_pipes: pipes from which to copy terminal size if desired.
2408                 """
2409                 return os.pipe()
2410
2411         def _spawn(self, args, **kwargs):
2412                 return portage.process.spawn(args, **kwargs)
2413
2414         def _output_handler(self, fd, event):
2415
2416                 if event & PollConstants.POLLIN:
2417
2418                         files = self._files
2419                         buf = array.array('B')
2420                         try:
2421                                 buf.fromfile(files.process, self._bufsize)
2422                         except EOFError:
2423                                 pass
2424
2425                         if buf:
2426                                 if not self.background:
2427                                         buf.tofile(files.stdout)
2428                                         files.stdout.flush()
2429                                 buf.tofile(files.log)
2430                                 files.log.flush()
2431                         else:
2432                                 self._unregister()
2433                                 self.wait()
2434
2435                 self._unregister_if_appropriate(event)
2436                 return self._registered
2437
2438         def _dummy_handler(self, fd, event):
2439                 """
2440                 This method is mainly interested in detecting EOF, since
2441                 the only purpose of the pipe is to allow the scheduler to
2442                 monitor the process from inside a poll() loop.
2443                 """
2444
2445                 if event & PollConstants.POLLIN:
2446
2447                         buf = array.array('B')
2448                         try:
2449                                 buf.fromfile(self._files.process, self._bufsize)
2450                         except EOFError:
2451                                 pass
2452
2453                         if buf:
2454                                 pass
2455                         else:
2456                                 self._unregister()
2457                                 self.wait()
2458
2459                 self._unregister_if_appropriate(event)
2460                 return self._registered
2461
2462 class MiscFunctionsProcess(SpawnProcess):
2463         """
2464         Spawns misc-functions.sh with an existing ebuild environment.
2465         """
2466
2467         __slots__ = ("commands", "phase", "pkg", "settings")
2468
2469         def _start(self):
2470                 settings = self.settings
2471                 settings.pop("EBUILD_PHASE", None)
2472                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2473                 misc_sh_binary = os.path.join(portage_bin_path,
2474                         os.path.basename(portage.const.MISC_SH_BINARY))
2475
2476                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2477                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2478
2479                 portage._doebuild_exit_status_unlink(
2480                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2481
2482                 SpawnProcess._start(self)
2483
2484         def _spawn(self, args, **kwargs):
2485                 settings = self.settings
2486                 debug = settings.get("PORTAGE_DEBUG") == "1"
2487                 return portage.spawn(" ".join(args), settings,
2488                         debug=debug, **kwargs)
2489
2490         def _set_returncode(self, wait_retval):
2491                 SpawnProcess._set_returncode(self, wait_retval)
2492                 self.returncode = portage._doebuild_exit_status_check_and_log(
2493                         self.settings, self.phase, self.returncode)
2494
2495 class EbuildFetcher(SpawnProcess):
2496
2497         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2498                 ("_build_dir",)
2499
2500         def _start(self):
2501
2502                 root_config = self.pkg.root_config
2503                 portdb = root_config.trees["porttree"].dbapi
2504                 ebuild_path = portdb.findname(self.pkg.cpv)
2505                 settings = self.config_pool.allocate()
2506                 settings.setcpv(self.pkg)
2507
2508                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2509                 # should not be touched since otherwise it could interfere with
2510                 # another instance of the same cpv concurrently being built for a
2511                 # different $ROOT (currently, builds only cooperate with prefetchers
2512                 # that are spawned for the same $ROOT).
2513                 if not self.prefetch:
2514                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2515                         self._build_dir.lock()
2516                         self._build_dir.clean_log()
2517                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2518                         if self.logfile is None:
2519                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2520
2521                 phase = "fetch"
2522                 if self.fetchall:
2523                         phase = "fetchall"
2524
2525                 # If any incremental variables have been overridden
2526                 # via the environment, those values need to be passed
2527                 # along here so that they are correctly considered by
2528                 # the config instance in the subproccess.
2529                 fetch_env = os.environ.copy()
2530
2531                 nocolor = settings.get("NOCOLOR")
2532                 if nocolor is not None:
2533                         fetch_env["NOCOLOR"] = nocolor
2534
2535                 fetch_env["PORTAGE_NICENESS"] = "0"
2536                 if self.prefetch:
2537                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2538
2539                 ebuild_binary = os.path.join(
2540                         settings["PORTAGE_BIN_PATH"], "ebuild")
2541
2542                 fetch_args = [ebuild_binary, ebuild_path, phase]
2543                 debug = settings.get("PORTAGE_DEBUG") == "1"
2544                 if debug:
2545                         fetch_args.append("--debug")
2546
2547                 self.args = fetch_args
2548                 self.env = fetch_env
2549                 SpawnProcess._start(self)
2550
2551         def _pipe(self, fd_pipes):
2552                 """When appropriate, use a pty so that fetcher progress bars,
2553                 like wget has, will work properly."""
2554                 if self.background or not sys.stdout.isatty():
2555                         # When the output only goes to a log file,
2556                         # there's no point in creating a pty.
2557                         return os.pipe()
2558                 stdout_pipe = fd_pipes.get(1)
2559                 got_pty, master_fd, slave_fd = \
2560                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2561                 return (master_fd, slave_fd)
2562
2563         def _set_returncode(self, wait_retval):
2564                 SpawnProcess._set_returncode(self, wait_retval)
2565                 # Collect elog messages that might have been
2566                 # created by the pkg_nofetch phase.
2567                 if self._build_dir is not None:
2568                         # Skip elog messages for prefetch, in order to avoid duplicates.
2569                         if not self.prefetch and self.returncode != os.EX_OK:
2570                                 elog_out = None
2571                                 if self.logfile is not None:
2572                                         if self.background:
2573                                                 elog_out = open(self.logfile, 'a')
2574                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2575                                 if self.logfile is not None:
2576                                         msg += ", Log file:"
2577                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2578                                 if self.logfile is not None:
2579                                         eerror(" '%s'" % (self.logfile,),
2580                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2581                                 if elog_out is not None:
2582                                         elog_out.close()
2583                         if not self.prefetch:
2584                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2585                         features = self._build_dir.settings.features
2586                         if self.returncode == os.EX_OK:
2587                                 self._build_dir.clean_log()
2588                         self._build_dir.unlock()
2589                         self.config_pool.deallocate(self._build_dir.settings)
2590                         self._build_dir = None
2591
2592 class EbuildBuildDir(SlotObject):
2593
2594         __slots__ = ("dir_path", "pkg", "settings",
2595                 "locked", "_catdir", "_lock_obj")
2596
2597         def __init__(self, **kwargs):
2598                 SlotObject.__init__(self, **kwargs)
2599                 self.locked = False
2600
2601         def lock(self):
2602                 """
2603                 This raises an AlreadyLocked exception if lock() is called
2604                 while a lock is already held. In order to avoid this, call
2605                 unlock() or check whether the "locked" attribute is True
2606                 or False before calling lock().
2607                 """
2608                 if self._lock_obj is not None:
2609                         raise self.AlreadyLocked((self._lock_obj,))
2610
2611                 dir_path = self.dir_path
2612                 if dir_path is None:
2613                         root_config = self.pkg.root_config
2614                         portdb = root_config.trees["porttree"].dbapi
2615                         ebuild_path = portdb.findname(self.pkg.cpv)
2616                         settings = self.settings
2617                         settings.setcpv(self.pkg)
2618                         debug = settings.get("PORTAGE_DEBUG") == "1"
2619                         use_cache = 1 # always true
2620                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2621                                 self.settings, debug, use_cache, portdb)
2622                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2623
2624                 catdir = os.path.dirname(dir_path)
2625                 self._catdir = catdir
2626
2627                 portage.util.ensure_dirs(os.path.dirname(catdir),
2628                         gid=portage.portage_gid,
2629                         mode=070, mask=0)
2630                 catdir_lock = None
2631                 try:
2632                         catdir_lock = portage.locks.lockdir(catdir)
2633                         portage.util.ensure_dirs(catdir,
2634                                 gid=portage.portage_gid,
2635                                 mode=070, mask=0)
2636                         self._lock_obj = portage.locks.lockdir(dir_path)
2637                 finally:
2638                         self.locked = self._lock_obj is not None
2639                         if catdir_lock is not None:
2640                                 portage.locks.unlockdir(catdir_lock)
2641
2642         def clean_log(self):
2643                 """Discard existing log."""
2644                 settings = self.settings
2645
2646                 for x in ('.logid', 'temp/build.log'):
2647                         try:
2648                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2649                         except OSError:
2650                                 pass
2651
2652         def unlock(self):
2653                 if self._lock_obj is None:
2654                         return
2655
2656                 portage.locks.unlockdir(self._lock_obj)
2657                 self._lock_obj = None
2658                 self.locked = False
2659
2660                 catdir = self._catdir
2661                 catdir_lock = None
2662                 try:
2663                         catdir_lock = portage.locks.lockdir(catdir)
2664                 finally:
2665                         if catdir_lock:
2666                                 try:
2667                                         os.rmdir(catdir)
2668                                 except OSError, e:
2669                                         if e.errno not in (errno.ENOENT,
2670                                                 errno.ENOTEMPTY, errno.EEXIST):
2671                                                 raise
2672                                         del e
2673                                 portage.locks.unlockdir(catdir_lock)
2674
2675         class AlreadyLocked(portage.exception.PortageException):
2676                 pass
2677
2678 class EbuildBuild(CompositeTask):
2679
2680         __slots__ = ("args_set", "config_pool", "find_blockers",
2681                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2682                 "prefetcher", "settings", "world_atom") + \
2683                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2684
2685         def _start(self):
2686
2687                 logger = self.logger
2688                 opts = self.opts
2689                 pkg = self.pkg
2690                 settings = self.settings
2691                 world_atom = self.world_atom
2692                 root_config = pkg.root_config
2693                 tree = "porttree"
2694                 self._tree = tree
2695                 portdb = root_config.trees[tree].dbapi
2696                 settings.setcpv(pkg)
2697                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2698                 ebuild_path = portdb.findname(self.pkg.cpv)
2699                 self._ebuild_path = ebuild_path
2700
2701                 prefetcher = self.prefetcher
2702                 if prefetcher is None:
2703                         pass
2704                 elif not prefetcher.isAlive():
2705                         prefetcher.cancel()
2706                 elif prefetcher.poll() is None:
2707
2708                         waiting_msg = "Fetching files " + \
2709                                 "in the background. " + \
2710                                 "To view fetch progress, run `tail -f " + \
2711                                 "/var/log/emerge-fetch.log` in another " + \
2712                                 "terminal."
2713                         msg_prefix = colorize("GOOD", " * ")
2714                         from textwrap import wrap
2715                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2716                                 for line in wrap(waiting_msg, 65))
2717                         if not self.background:
2718                                 writemsg(waiting_msg, noiselevel=-1)
2719
2720                         self._current_task = prefetcher
2721                         prefetcher.addExitListener(self._prefetch_exit)
2722                         return
2723
2724                 self._prefetch_exit(prefetcher)
2725
2726         def _prefetch_exit(self, prefetcher):
2727
2728                 opts = self.opts
2729                 pkg = self.pkg
2730                 settings = self.settings
2731
2732                 if opts.fetchonly:
2733                                 fetcher = EbuildFetchonly(
2734                                         fetch_all=opts.fetch_all_uri,
2735                                         pkg=pkg, pretend=opts.pretend,
2736                                         settings=settings)
2737                                 retval = fetcher.execute()
2738                                 self.returncode = retval
2739                                 self.wait()
2740                                 return
2741
2742                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2743                         fetchall=opts.fetch_all_uri,
2744                         fetchonly=opts.fetchonly,
2745                         background=self.background,
2746                         pkg=pkg, scheduler=self.scheduler)
2747
2748                 self._start_task(fetcher, self._fetch_exit)
2749
2750         def _fetch_exit(self, fetcher):
2751                 opts = self.opts
2752                 pkg = self.pkg
2753
2754                 fetch_failed = False
2755                 if opts.fetchonly:
2756                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2757                 else:
2758                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2759
2760                 if fetch_failed and fetcher.logfile is not None and \
2761                         os.path.exists(fetcher.logfile):
2762                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2763
2764                 if not fetch_failed and fetcher.logfile is not None:
2765                         # Fetch was successful, so remove the fetch log.
2766                         try:
2767                                 os.unlink(fetcher.logfile)
2768                         except OSError:
2769                                 pass
2770
2771                 if fetch_failed or opts.fetchonly:
2772                         self.wait()
2773                         return
2774
2775                 logger = self.logger
2776                 opts = self.opts
2777                 pkg_count = self.pkg_count
2778                 scheduler = self.scheduler
2779                 settings = self.settings
2780                 features = settings.features
2781                 ebuild_path = self._ebuild_path
2782                 system_set = pkg.root_config.sets["system"]
2783
2784                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2785                 self._build_dir.lock()
2786
2787                 # Cleaning is triggered before the setup
2788                 # phase, in portage.doebuild().
2789                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2790                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2791                 short_msg = "emerge: (%s of %s) %s Clean" % \
2792                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2793                 logger.log(msg, short_msg=short_msg)
2794
2795                 #buildsyspkg: Check if we need to _force_ binary package creation
2796                 self._issyspkg = "buildsyspkg" in features and \
2797                                 system_set.findAtomForPackage(pkg) and \
2798                                 not opts.buildpkg
2799
2800                 if opts.buildpkg or self._issyspkg:
2801
2802                         self._buildpkg = True
2803
2804                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2805                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2806                         short_msg = "emerge: (%s of %s) %s Compile" % \
2807                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2808                         logger.log(msg, short_msg=short_msg)
2809
2810                 else:
2811                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2812                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2813                         short_msg = "emerge: (%s of %s) %s Compile" % \
2814                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2815                         logger.log(msg, short_msg=short_msg)
2816
2817                 build = EbuildExecuter(background=self.background, pkg=pkg,
2818                         scheduler=scheduler, settings=settings)
2819                 self._start_task(build, self._build_exit)
2820
2821         def _unlock_builddir(self):
2822                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2823                 self._build_dir.unlock()
2824
2825         def _build_exit(self, build):
2826                 if self._default_exit(build) != os.EX_OK:
2827                         self._unlock_builddir()
2828                         self.wait()
2829                         return
2830
2831                 opts = self.opts
2832                 buildpkg = self._buildpkg
2833
2834                 if not buildpkg:
2835                         self._final_exit(build)
2836                         self.wait()
2837                         return
2838
2839                 if self._issyspkg:
2840                         msg = ">>> This is a system package, " + \
2841                                 "let's pack a rescue tarball.\n"
2842
2843                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2844                         if log_path is not None:
2845                                 log_file = open(log_path, 'a')
2846                                 try:
2847                                         log_file.write(msg)
2848                                 finally:
2849                                         log_file.close()
2850
2851                         if not self.background:
2852                                 portage.writemsg_stdout(msg, noiselevel=-1)
2853
2854                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2855                         scheduler=self.scheduler, settings=self.settings)
2856
2857                 self._start_task(packager, self._buildpkg_exit)
2858
2859         def _buildpkg_exit(self, packager):
2860                 """
2861                 Released build dir lock when there is a failure or
2862                 when in buildpkgonly mode. Otherwise, the lock will
2863                 be released when merge() is called.
2864                 """
2865
2866                 if self._default_exit(packager) != os.EX_OK:
2867                         self._unlock_builddir()
2868                         self.wait()
2869                         return
2870
2871                 if self.opts.buildpkgonly:
2872                         # Need to call "clean" phase for buildpkgonly mode
2873                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2874                         phase = "clean"
2875                         clean_phase = EbuildPhase(background=self.background,
2876                                 pkg=self.pkg, phase=phase,
2877                                 scheduler=self.scheduler, settings=self.settings,
2878                                 tree=self._tree)
2879                         self._start_task(clean_phase, self._clean_exit)
2880                         return
2881
2882                 # Continue holding the builddir lock until
2883                 # after the package has been installed.
2884                 self._current_task = None
2885                 self.returncode = packager.returncode
2886                 self.wait()
2887
2888         def _clean_exit(self, clean_phase):
2889                 if self._final_exit(clean_phase) != os.EX_OK or \
2890                         self.opts.buildpkgonly:
2891                         self._unlock_builddir()
2892                 self.wait()
2893
2894         def install(self):
2895                 """
2896                 Install the package and then clean up and release locks.
2897                 Only call this after the build has completed successfully
2898                 and neither fetchonly nor buildpkgonly mode are enabled.
2899                 """
2900
2901                 find_blockers = self.find_blockers
2902                 ldpath_mtimes = self.ldpath_mtimes
2903                 logger = self.logger
2904                 pkg = self.pkg
2905                 pkg_count = self.pkg_count
2906                 settings = self.settings
2907                 world_atom = self.world_atom
2908                 ebuild_path = self._ebuild_path
2909                 tree = self._tree
2910
2911                 merge = EbuildMerge(find_blockers=self.find_blockers,
2912                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2913                         pkg_count=pkg_count, pkg_path=ebuild_path,
2914                         scheduler=self.scheduler,
2915                         settings=settings, tree=tree, world_atom=world_atom)
2916
2917                 msg = " === (%s of %s) Merging (%s::%s)" % \
2918                         (pkg_count.curval, pkg_count.maxval,
2919                         pkg.cpv, ebuild_path)
2920                 short_msg = "emerge: (%s of %s) %s Merge" % \
2921                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2922                 logger.log(msg, short_msg=short_msg)
2923
2924                 try:
2925                         rval = merge.execute()
2926                 finally:
2927                         self._unlock_builddir()
2928
2929                 return rval
2930
2931 class EbuildExecuter(CompositeTask):
2932
2933         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2934
2935         _phases = ("prepare", "configure", "compile", "test", "install")
2936
2937         _live_eclasses = frozenset([
2938                 "bzr",
2939                 "cvs",
2940                 "darcs",
2941                 "git",
2942                 "mercurial",
2943                 "subversion"
2944         ])
2945
2946         def _start(self):
2947                 self._tree = "porttree"
2948                 pkg = self.pkg
2949                 phase = "clean"
2950                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2951                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2952                 self._start_task(clean_phase, self._clean_phase_exit)
2953
2954         def _clean_phase_exit(self, clean_phase):
2955
2956                 if self._default_exit(clean_phase) != os.EX_OK:
2957                         self.wait()
2958                         return
2959
2960                 pkg = self.pkg
2961                 scheduler = self.scheduler
2962                 settings = self.settings
2963                 cleanup = 1
2964
2965                 # This initializes PORTAGE_LOG_FILE.
2966                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2967
2968                 setup_phase = EbuildPhase(background=self.background,
2969                         pkg=pkg, phase="setup", scheduler=scheduler,
2970                         settings=settings, tree=self._tree)
2971
2972                 setup_phase.addExitListener(self._setup_exit)
2973                 self._current_task = setup_phase
2974                 self.scheduler.scheduleSetup(setup_phase)
2975
2976         def _setup_exit(self, setup_phase):
2977
2978                 if self._default_exit(setup_phase) != os.EX_OK:
2979                         self.wait()
2980                         return
2981
2982                 unpack_phase = EbuildPhase(background=self.background,
2983                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2984                         settings=self.settings, tree=self._tree)
2985
2986                 if self._live_eclasses.intersection(self.pkg.inherited):
2987                         # Serialize $DISTDIR access for live ebuilds since
2988                         # otherwise they can interfere with eachother.
2989
2990                         unpack_phase.addExitListener(self._unpack_exit)
2991                         self._current_task = unpack_phase
2992                         self.scheduler.scheduleUnpack(unpack_phase)
2993
2994                 else:
2995                         self._start_task(unpack_phase, self._unpack_exit)
2996
2997         def _unpack_exit(self, unpack_phase):
2998
2999                 if self._default_exit(unpack_phase) != os.EX_OK:
3000                         self.wait()
3001                         return
3002
3003                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3004
3005                 pkg = self.pkg
3006                 phases = self._phases
3007                 eapi = pkg.metadata["EAPI"]
3008                 if eapi in ("0", "1"):
3009                         # skip src_prepare and src_configure
3010                         phases = phases[2:]
3011
3012                 for phase in phases:
3013                         ebuild_phases.add(EbuildPhase(background=self.background,
3014                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3015                                 settings=self.settings, tree=self._tree))
3016
3017                 self._start_task(ebuild_phases, self._default_final_exit)
3018
3019 class EbuildMetadataPhase(SubProcess):
3020
3021         """
3022         Asynchronous interface for the ebuild "depend" phase which is
3023         used to extract metadata from the ebuild.
3024         """
3025
3026         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3027                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3028                 ("_raw_metadata",)
3029
3030         _file_names = ("ebuild",)
3031         _files_dict = slot_dict_class(_file_names, prefix="")
3032         _metadata_fd = 9
3033
3034         def _start(self):
3035                 settings = self.settings
3036                 settings.setcpv(self.cpv)
3037                 ebuild_path = self.ebuild_path
3038
3039                 eapi = None
3040                 if 'parse-eapi-glep-55' in settings.features:
3041                         pf, eapi = portage._split_ebuild_name_glep55(
3042                                 os.path.basename(ebuild_path))
3043                 if eapi is None and \
3044                         'parse-eapi-ebuild-head' in settings.features:
3045                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3046                                 mode='r', encoding='utf_8', errors='replace'))
3047
3048                 if eapi is not None:
3049                         if not portage.eapi_is_supported(eapi):
3050                                 self.metadata_callback(self.cpv, self.ebuild_path,
3051                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3052                                 self.returncode = os.EX_OK
3053                                 self.wait()
3054                                 return
3055
3056                         settings.configdict['pkg']['EAPI'] = eapi
3057
3058                 debug = settings.get("PORTAGE_DEBUG") == "1"
3059                 master_fd = None
3060                 slave_fd = None
3061                 fd_pipes = None
3062                 if self.fd_pipes is not None:
3063                         fd_pipes = self.fd_pipes.copy()
3064                 else:
3065                         fd_pipes = {}
3066
3067                 fd_pipes.setdefault(0, sys.stdin.fileno())
3068                 fd_pipes.setdefault(1, sys.stdout.fileno())
3069                 fd_pipes.setdefault(2, sys.stderr.fileno())
3070
3071                 # flush any pending output
3072                 for fd in fd_pipes.itervalues():
3073                         if fd == sys.stdout.fileno():
3074                                 sys.stdout.flush()
3075                         if fd == sys.stderr.fileno():
3076                                 sys.stderr.flush()
3077
3078                 fd_pipes_orig = fd_pipes.copy()
3079                 self._files = self._files_dict()
3080                 files = self._files
3081
3082                 master_fd, slave_fd = os.pipe()
3083                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3084                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3085
3086                 fd_pipes[self._metadata_fd] = slave_fd
3087
3088                 self._raw_metadata = []
3089                 files.ebuild = os.fdopen(master_fd, 'r')
3090                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3091                         self._registered_events, self._output_handler)
3092                 self._registered = True
3093
3094                 retval = portage.doebuild(ebuild_path, "depend",
3095                         settings["ROOT"], settings, debug,
3096                         mydbapi=self.portdb, tree="porttree",
3097                         fd_pipes=fd_pipes, returnpid=True)
3098
3099                 os.close(slave_fd)
3100
3101                 if isinstance(retval, int):
3102                         # doebuild failed before spawning
3103                         self._unregister()
3104                         self.returncode = retval
3105                         self.wait()
3106                         return
3107
3108                 self.pid = retval[0]
3109                 portage.process.spawned_pids.remove(self.pid)
3110
3111         def _output_handler(self, fd, event):
3112
3113                 if event & PollConstants.POLLIN:
3114                         self._raw_metadata.append(self._files.ebuild.read())
3115                         if not self._raw_metadata[-1]:
3116                                 self._unregister()
3117                                 self.wait()
3118
3119                 self._unregister_if_appropriate(event)
3120                 return self._registered
3121
3122         def _set_returncode(self, wait_retval):
3123                 SubProcess._set_returncode(self, wait_retval)
3124                 if self.returncode == os.EX_OK:
3125                         metadata_lines = "".join(self._raw_metadata).splitlines()
3126                         if len(portage.auxdbkeys) != len(metadata_lines):
3127                                 # Don't trust bash's returncode if the
3128                                 # number of lines is incorrect.
3129                                 self.returncode = 1
3130                         else:
3131                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3132                                 self.metadata_callback(self.cpv, self.ebuild_path,
3133                                         self.repo_path, metadata, self.ebuild_mtime)
3134
3135 class EbuildProcess(SpawnProcess):
3136
3137         __slots__ = ("phase", "pkg", "settings", "tree")
3138
3139         def _start(self):
3140                 # Don't open the log file during the clean phase since the
3141                 # open file can result in an nfs lock on $T/build.log which
3142                 # prevents the clean phase from removing $T.
3143                 if self.phase not in ("clean", "cleanrm"):
3144                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3145                 SpawnProcess._start(self)
3146
3147         def _pipe(self, fd_pipes):
3148                 stdout_pipe = fd_pipes.get(1)
3149                 got_pty, master_fd, slave_fd = \
3150                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3151                 return (master_fd, slave_fd)
3152
3153         def _spawn(self, args, **kwargs):
3154
3155                 root_config = self.pkg.root_config
3156                 tree = self.tree
3157                 mydbapi = root_config.trees[tree].dbapi
3158                 settings = self.settings
3159                 ebuild_path = settings["EBUILD"]
3160                 debug = settings.get("PORTAGE_DEBUG") == "1"
3161
3162                 rval = portage.doebuild(ebuild_path, self.phase,
3163                         root_config.root, settings, debug,
3164                         mydbapi=mydbapi, tree=tree, **kwargs)
3165
3166                 return rval
3167
3168         def _set_returncode(self, wait_retval):
3169                 SpawnProcess._set_returncode(self, wait_retval)
3170
3171                 if self.phase not in ("clean", "cleanrm"):
3172                         self.returncode = portage._doebuild_exit_status_check_and_log(
3173                                 self.settings, self.phase, self.returncode)
3174
3175                 if self.phase == "test" and self.returncode != os.EX_OK and \
3176                         "test-fail-continue" in self.settings.features:
3177                         self.returncode = os.EX_OK
3178
3179                 portage._post_phase_userpriv_perms(self.settings)
3180
3181 class EbuildPhase(CompositeTask):
3182
3183         __slots__ = ("background", "pkg", "phase",
3184                 "scheduler", "settings", "tree")
3185
3186         _post_phase_cmds = portage._post_phase_cmds
3187
3188         def _start(self):
3189
3190                 ebuild_process = EbuildProcess(background=self.background,
3191                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3192                         settings=self.settings, tree=self.tree)
3193
3194                 self._start_task(ebuild_process, self._ebuild_exit)
3195
3196         def _ebuild_exit(self, ebuild_process):
3197
3198                 if self.phase == "install":
3199                         out = None
3200                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3201                         log_file = None
3202                         if self.background and log_path is not None:
3203                                 log_file = open(log_path, 'a')
3204                                 out = log_file
3205                         try:
3206                                 portage._check_build_log(self.settings, out=out)
3207                         finally:
3208                                 if log_file is not None:
3209                                         log_file.close()
3210
3211                 if self._default_exit(ebuild_process) != os.EX_OK:
3212                         self.wait()
3213                         return
3214
3215                 settings = self.settings
3216
3217                 if self.phase == "install":
3218                         portage._post_src_install_chost_fix(settings)
3219                         portage._post_src_install_uid_fix(settings)
3220
3221                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3222                 if post_phase_cmds is not None:
3223                         post_phase = MiscFunctionsProcess(background=self.background,
3224                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3225                                 scheduler=self.scheduler, settings=settings)
3226                         self._start_task(post_phase, self._post_phase_exit)
3227                         return
3228
3229                 self.returncode = ebuild_process.returncode
3230                 self._current_task = None
3231                 self.wait()
3232
3233         def _post_phase_exit(self, post_phase):
3234                 if self._final_exit(post_phase) != os.EX_OK:
3235                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3236                                 noiselevel=-1)
3237                 self._current_task = None
3238                 self.wait()
3239                 return
3240
3241 class EbuildBinpkg(EbuildProcess):
3242         """
3243         This assumes that src_install() has successfully completed.
3244         """
3245         __slots__ = ("_binpkg_tmpfile",)
3246
3247         def _start(self):
3248                 self.phase = "package"
3249                 self.tree = "porttree"
3250                 pkg = self.pkg
3251                 root_config = pkg.root_config
3252                 portdb = root_config.trees["porttree"].dbapi
3253                 bintree = root_config.trees["bintree"]
3254                 ebuild_path = portdb.findname(self.pkg.cpv)
3255                 settings = self.settings
3256                 debug = settings.get("PORTAGE_DEBUG") == "1"
3257
3258                 bintree.prevent_collision(pkg.cpv)
3259                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3260                         pkg.cpv + ".tbz2." + str(os.getpid()))
3261                 self._binpkg_tmpfile = binpkg_tmpfile
3262                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3263                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3264
3265                 try:
3266                         EbuildProcess._start(self)
3267                 finally:
3268                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3269
3270         def _set_returncode(self, wait_retval):
3271                 EbuildProcess._set_returncode(self, wait_retval)
3272
3273                 pkg = self.pkg
3274                 bintree = pkg.root_config.trees["bintree"]
3275                 binpkg_tmpfile = self._binpkg_tmpfile
3276                 if self.returncode == os.EX_OK:
3277                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3278
3279 class EbuildMerge(SlotObject):
3280
3281         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3282                 "pkg", "pkg_count", "pkg_path", "pretend",
3283                 "scheduler", "settings", "tree", "world_atom")
3284
3285         def execute(self):
3286                 root_config = self.pkg.root_config
3287                 settings = self.settings
3288                 retval = portage.merge(settings["CATEGORY"],
3289                         settings["PF"], settings["D"],
3290                         os.path.join(settings["PORTAGE_BUILDDIR"],
3291                         "build-info"), root_config.root, settings,
3292                         myebuild=settings["EBUILD"],
3293                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3294                         vartree=root_config.trees["vartree"],
3295                         prev_mtimes=self.ldpath_mtimes,
3296                         scheduler=self.scheduler,
3297                         blockers=self.find_blockers)
3298
3299                 if retval == os.EX_OK:
3300                         self.world_atom(self.pkg)
3301                         self._log_success()
3302
3303                 return retval
3304
3305         def _log_success(self):
3306                 pkg = self.pkg
3307                 pkg_count = self.pkg_count
3308                 pkg_path = self.pkg_path
3309                 logger = self.logger
3310                 if "noclean" not in self.settings.features:
3311                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3312                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3313                         logger.log((" === (%s of %s) " + \
3314                                 "Post-Build Cleaning (%s::%s)") % \
3315                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3316                                 short_msg=short_msg)
3317                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3318                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3319
3320 class PackageUninstall(AsynchronousTask):
3321
3322         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3323
3324         def _start(self):
3325                 try:
3326                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3327                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3328                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3329                                 writemsg_level=self._writemsg_level)
3330                 except UninstallFailure, e:
3331                         self.returncode = e.status
3332                 else:
3333                         self.returncode = os.EX_OK
3334                 self.wait()
3335
3336         def _writemsg_level(self, msg, level=0, noiselevel=0):
3337
3338                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3339                 background = self.background
3340
3341                 if log_path is None:
3342                         if not (background and level < logging.WARNING):
3343                                 portage.util.writemsg_level(msg,
3344                                         level=level, noiselevel=noiselevel)
3345                 else:
3346                         if not background:
3347                                 portage.util.writemsg_level(msg,
3348                                         level=level, noiselevel=noiselevel)
3349
3350                         f = open(log_path, 'a')
3351                         try:
3352                                 f.write(msg)
3353                         finally:
3354                                 f.close()
3355
3356 class Binpkg(CompositeTask):
3357
3358         __slots__ = ("find_blockers",
3359                 "ldpath_mtimes", "logger", "opts",
3360                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3361                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3362                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3363
3364         def _writemsg_level(self, msg, level=0, noiselevel=0):
3365
3366                 if not self.background:
3367                         portage.util.writemsg_level(msg,
3368                                 level=level, noiselevel=noiselevel)
3369
3370                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3371                 if  log_path is not None:
3372                         f = open(log_path, 'a')
3373                         try:
3374                                 f.write(msg)
3375                         finally:
3376                                 f.close()
3377
3378         def _start(self):
3379
3380                 pkg = self.pkg
3381                 settings = self.settings
3382                 settings.setcpv(pkg)
3383                 self._tree = "bintree"
3384                 self._bintree = self.pkg.root_config.trees[self._tree]
3385                 self._verify = not self.opts.pretend
3386
3387                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3388                         "portage", pkg.category, pkg.pf)
3389                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3390                         pkg=pkg, settings=settings)
3391                 self._image_dir = os.path.join(dir_path, "image")
3392                 self._infloc = os.path.join(dir_path, "build-info")
3393                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3394                 settings["EBUILD"] = self._ebuild_path
3395                 debug = settings.get("PORTAGE_DEBUG") == "1"
3396                 portage.doebuild_environment(self._ebuild_path, "setup",
3397                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3398                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3399
3400                 # The prefetcher has already completed or it
3401                 # could be running now. If it's running now,
3402                 # wait for it to complete since it holds
3403                 # a lock on the file being fetched. The
3404                 # portage.locks functions are only designed
3405                 # to work between separate processes. Since
3406                 # the lock is held by the current process,
3407                 # use the scheduler and fetcher methods to
3408                 # synchronize with the fetcher.
3409                 prefetcher = self.prefetcher
3410                 if prefetcher is None:
3411                         pass
3412                 elif not prefetcher.isAlive():
3413                         prefetcher.cancel()
3414                 elif prefetcher.poll() is None:
3415
3416                         waiting_msg = ("Fetching '%s' " + \
3417                                 "in the background. " + \
3418                                 "To view fetch progress, run `tail -f " + \
3419                                 "/var/log/emerge-fetch.log` in another " + \
3420                                 "terminal.") % prefetcher.pkg_path
3421                         msg_prefix = colorize("GOOD", " * ")
3422                         from textwrap import wrap
3423                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3424                                 for line in wrap(waiting_msg, 65))
3425                         if not self.background:
3426                                 writemsg(waiting_msg, noiselevel=-1)
3427
3428                         self._current_task = prefetcher
3429                         prefetcher.addExitListener(self._prefetch_exit)
3430                         return
3431
3432                 self._prefetch_exit(prefetcher)
3433
3434         def _prefetch_exit(self, prefetcher):
3435
3436                 pkg = self.pkg
3437                 pkg_count = self.pkg_count
3438                 if not (self.opts.pretend or self.opts.fetchonly):
3439                         self._build_dir.lock()
3440                         # If necessary, discard old log so that we don't
3441                         # append to it.
3442                         self._build_dir.clean_log()
3443                         # Initialze PORTAGE_LOG_FILE.
3444                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3445                 fetcher = BinpkgFetcher(background=self.background,
3446                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3447                         pretend=self.opts.pretend, scheduler=self.scheduler)
3448                 pkg_path = fetcher.pkg_path
3449                 self._pkg_path = pkg_path
3450
3451                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3452
3453                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3454                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3455                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3456                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3457                         self.logger.log(msg, short_msg=short_msg)
3458                         self._start_task(fetcher, self._fetcher_exit)
3459                         return
3460
3461                 self._fetcher_exit(fetcher)
3462
3463         def _fetcher_exit(self, fetcher):
3464
3465                 # The fetcher only has a returncode when
3466                 # --getbinpkg is enabled.
3467                 if fetcher.returncode is not None:
3468                         self._fetched_pkg = True
3469                         if self._default_exit(fetcher) != os.EX_OK:
3470                                 self._unlock_builddir()
3471                                 self.wait()
3472                                 return
3473
3474                 if self.opts.pretend:
3475                         self._current_task = None
3476                         self.returncode = os.EX_OK
3477                         self.wait()
3478                         return
3479
3480                 verifier = None
3481                 if self._verify:
3482                         logfile = None
3483                         if self.background:
3484                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3485                         verifier = BinpkgVerifier(background=self.background,
3486                                 logfile=logfile, pkg=self.pkg)
3487                         self._start_task(verifier, self._verifier_exit)
3488                         return
3489
3490                 self._verifier_exit(verifier)
3491
3492         def _verifier_exit(self, verifier):
3493                 if verifier is not None and \
3494                         self._default_exit(verifier) != os.EX_OK:
3495                         self._unlock_builddir()
3496                         self.wait()
3497                         return
3498
3499                 logger = self.logger
3500                 pkg = self.pkg
3501                 pkg_count = self.pkg_count
3502                 pkg_path = self._pkg_path
3503
3504                 if self._fetched_pkg:
3505                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3506
3507                 if self.opts.fetchonly:
3508                         self._current_task = None
3509                         self.returncode = os.EX_OK
3510                         self.wait()
3511                         return
3512
3513                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3514                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3515                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3516                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3517                 logger.log(msg, short_msg=short_msg)
3518
3519                 phase = "clean"
3520                 settings = self.settings
3521                 ebuild_phase = EbuildPhase(background=self.background,
3522                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3523                         settings=settings, tree=self._tree)
3524
3525                 self._start_task(ebuild_phase, self._clean_exit)
3526
3527         def _clean_exit(self, clean_phase):
3528                 if self._default_exit(clean_phase) != os.EX_OK:
3529                         self._unlock_builddir()
3530                         self.wait()
3531                         return
3532
3533                 dir_path = self._build_dir.dir_path
3534
3535                 infloc = self._infloc
3536                 pkg = self.pkg
3537                 pkg_path = self._pkg_path
3538
3539                 dir_mode = 0755
3540                 for mydir in (dir_path, self._image_dir, infloc):
3541                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3542                                 gid=portage.data.portage_gid, mode=dir_mode)
3543
3544                 # This initializes PORTAGE_LOG_FILE.
3545                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3546                 self._writemsg_level(">>> Extracting info\n")
3547
3548                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3549                 check_missing_metadata = ("CATEGORY", "PF")
3550                 missing_metadata = set()
3551                 for k in check_missing_metadata:
3552                         v = pkg_xpak.getfile(k)
3553                         if not v:
3554                                 missing_metadata.add(k)
3555
3556                 pkg_xpak.unpackinfo(infloc)
3557                 for k in missing_metadata:
3558                         if k == "CATEGORY":
3559                                 v = pkg.category
3560                         elif k == "PF":
3561                                 v = pkg.pf
3562                         else:
3563                                 continue
3564
3565                         f = open(os.path.join(infloc, k), 'wb')
3566                         try:
3567                                 f.write(v + "\n")
3568                         finally:
3569                                 f.close()
3570
3571                 # Store the md5sum in the vdb.
3572                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3573                 try:
3574                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3575                 finally:
3576                         f.close()
3577
3578                 # This gives bashrc users an opportunity to do various things
3579                 # such as remove binary packages after they're installed.
3580                 settings = self.settings
3581                 settings.setcpv(self.pkg)
3582                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3583                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3584
3585                 phase = "setup"
3586                 setup_phase = EbuildPhase(background=self.background,
3587                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3588                         settings=settings, tree=self._tree)
3589
3590                 setup_phase.addExitListener(self._setup_exit)
3591                 self._current_task = setup_phase
3592                 self.scheduler.scheduleSetup(setup_phase)
3593
3594         def _setup_exit(self, setup_phase):
3595                 if self._default_exit(setup_phase) != os.EX_OK:
3596                         self._unlock_builddir()
3597                         self.wait()
3598                         return
3599
3600                 extractor = BinpkgExtractorAsync(background=self.background,
3601                         image_dir=self._image_dir,
3602                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3603                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3604                 self._start_task(extractor, self._extractor_exit)
3605
3606         def _extractor_exit(self, extractor):
3607                 if self._final_exit(extractor) != os.EX_OK:
3608                         self._unlock_builddir()
3609                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3610                                 noiselevel=-1)
3611                 self.wait()
3612
3613         def _unlock_builddir(self):
3614                 if self.opts.pretend or self.opts.fetchonly:
3615                         return
3616                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3617                 self._build_dir.unlock()
3618
3619         def install(self):
3620
3621                 # This gives bashrc users an opportunity to do various things
3622                 # such as remove binary packages after they're installed.
3623                 settings = self.settings
3624                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3625                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3626
3627                 merge = EbuildMerge(find_blockers=self.find_blockers,
3628                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3629                         pkg=self.pkg, pkg_count=self.pkg_count,
3630                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3631                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3632
3633                 try:
3634                         retval = merge.execute()
3635                 finally:
3636                         settings.pop("PORTAGE_BINPKG_FILE", None)
3637                         self._unlock_builddir()
3638                 return retval
3639
3640 class BinpkgFetcher(SpawnProcess):
3641
3642         __slots__ = ("pkg", "pretend",
3643                 "locked", "pkg_path", "_lock_obj")
3644
3645         def __init__(self, **kwargs):
3646                 SpawnProcess.__init__(self, **kwargs)
3647                 pkg = self.pkg
3648                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3649
3650         def _start(self):
3651
3652                 if self.cancelled:
3653                         return
3654
3655                 pkg = self.pkg
3656                 pretend = self.pretend
3657                 bintree = pkg.root_config.trees["bintree"]
3658                 settings = bintree.settings
3659                 use_locks = "distlocks" in settings.features
3660                 pkg_path = self.pkg_path
3661
3662                 if not pretend:
3663                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3664                         if use_locks:
3665                                 self.lock()
3666                 exists = os.path.exists(pkg_path)
3667                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3668                 if not (pretend or resume):
3669                         # Remove existing file or broken symlink.
3670                         try:
3671                                 os.unlink(pkg_path)
3672                         except OSError:
3673                                 pass
3674
3675                 # urljoin doesn't work correctly with
3676                 # unrecognized protocols like sftp
3677                 if bintree._remote_has_index:
3678                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3679                         if not rel_uri:
3680                                 rel_uri = pkg.cpv + ".tbz2"
3681                         uri = bintree._remote_base_uri.rstrip("/") + \
3682                                 "/" + rel_uri.lstrip("/")
3683                 else:
3684                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3685                                 "/" + pkg.pf + ".tbz2"
3686
3687                 if pretend:
3688                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3689                         self.returncode = os.EX_OK
3690                         self.wait()
3691                         return
3692
3693                 protocol = urlparse.urlparse(uri)[0]
3694                 fcmd_prefix = "FETCHCOMMAND"
3695                 if resume:
3696                         fcmd_prefix = "RESUMECOMMAND"
3697                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3698                 if not fcmd:
3699                         fcmd = settings.get(fcmd_prefix)
3700
3701                 fcmd_vars = {
3702                         "DISTDIR" : os.path.dirname(pkg_path),
3703                         "URI"     : uri,
3704                         "FILE"    : os.path.basename(pkg_path)
3705                 }
3706
3707                 fetch_env = dict(settings.iteritems())
3708                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3709                         for x in shlex.split(fcmd)]
3710
3711                 if self.fd_pipes is None:
3712                         self.fd_pipes = {}
3713                 fd_pipes = self.fd_pipes
3714
3715                 # Redirect all output to stdout since some fetchers like
3716                 # wget pollute stderr (if portage detects a problem then it
3717                 # can send it's own message to stderr).
3718                 fd_pipes.setdefault(0, sys.stdin.fileno())
3719                 fd_pipes.setdefault(1, sys.stdout.fileno())
3720                 fd_pipes.setdefault(2, sys.stdout.fileno())
3721
3722                 self.args = fetch_args
3723                 self.env = fetch_env
3724                 SpawnProcess._start(self)
3725
3726         def _set_returncode(self, wait_retval):
3727                 SpawnProcess._set_returncode(self, wait_retval)
3728                 if self.returncode == os.EX_OK:
3729                         # If possible, update the mtime to match the remote package if
3730                         # the fetcher didn't already do it automatically.
3731                         bintree = self.pkg.root_config.trees["bintree"]
3732                         if bintree._remote_has_index:
3733                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3734                                 if remote_mtime is not None:
3735                                         try:
3736                                                 remote_mtime = long(remote_mtime)
3737                                         except ValueError:
3738                                                 pass
3739                                         else:
3740                                                 try:
3741                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3742                                                 except OSError:
3743                                                         pass
3744                                                 else:
3745                                                         if remote_mtime != local_mtime:
3746                                                                 try:
3747                                                                         os.utime(self.pkg_path,
3748                                                                                 (remote_mtime, remote_mtime))
3749                                                                 except OSError:
3750                                                                         pass
3751
3752                 if self.locked:
3753                         self.unlock()
3754
3755         def lock(self):
3756                 """
3757                 This raises an AlreadyLocked exception if lock() is called
3758                 while a lock is already held. In order to avoid this, call
3759                 unlock() or check whether the "locked" attribute is True
3760                 or False before calling lock().
3761                 """
3762                 if self._lock_obj is not None:
3763                         raise self.AlreadyLocked((self._lock_obj,))
3764
3765                 self._lock_obj = portage.locks.lockfile(
3766                         self.pkg_path, wantnewlockfile=1)
3767                 self.locked = True
3768
3769         class AlreadyLocked(portage.exception.PortageException):
3770                 pass
3771
3772         def unlock(self):
3773                 if self._lock_obj is None:
3774                         return
3775                 portage.locks.unlockfile(self._lock_obj)
3776                 self._lock_obj = None
3777                 self.locked = False
3778
3779 class BinpkgVerifier(AsynchronousTask):
3780         __slots__ = ("logfile", "pkg",)
3781
3782         def _start(self):
3783                 """
3784                 Note: Unlike a normal AsynchronousTask.start() method,
3785                 this one does all work is synchronously. The returncode
3786                 attribute will be set before it returns.
3787                 """
3788
3789                 pkg = self.pkg
3790                 root_config = pkg.root_config
3791                 bintree = root_config.trees["bintree"]
3792                 rval = os.EX_OK
3793                 stdout_orig = sys.stdout
3794                 stderr_orig = sys.stderr
3795                 log_file = None
3796                 if self.background and self.logfile is not None:
3797                         log_file = open(self.logfile, 'a')
3798                 try:
3799                         if log_file is not None:
3800                                 sys.stdout = log_file
3801                                 sys.stderr = log_file
3802                         try:
3803                                 bintree.digestCheck(pkg)
3804                         except portage.exception.FileNotFound:
3805                                 writemsg("!!! Fetching Binary failed " + \
3806                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3807                                 rval = 1
3808                         except portage.exception.DigestException, e:
3809                                 writemsg("\n!!! Digest verification failed:\n",
3810                                         noiselevel=-1)
3811                                 writemsg("!!! %s\n" % e.value[0],
3812                                         noiselevel=-1)
3813                                 writemsg("!!! Reason: %s\n" % e.value[1],
3814                                         noiselevel=-1)
3815                                 writemsg("!!! Got: %s\n" % e.value[2],
3816                                         noiselevel=-1)
3817                                 writemsg("!!! Expected: %s\n" % e.value[3],
3818                                         noiselevel=-1)
3819                                 rval = 1
3820                         if rval != os.EX_OK:
3821                                 pkg_path = bintree.getname(pkg.cpv)
3822                                 head, tail = os.path.split(pkg_path)
3823                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3824                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3825                                         noiselevel=-1)
3826                 finally:
3827                         sys.stdout = stdout_orig
3828                         sys.stderr = stderr_orig
3829                         if log_file is not None:
3830                                 log_file.close()
3831
3832                 self.returncode = rval
3833                 self.wait()
3834
3835 class BinpkgPrefetcher(CompositeTask):
3836
3837         __slots__ = ("pkg",) + \
3838                 ("pkg_path", "_bintree",)
3839
3840         def _start(self):
3841                 self._bintree = self.pkg.root_config.trees["bintree"]
3842                 fetcher = BinpkgFetcher(background=self.background,
3843                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3844                         scheduler=self.scheduler)
3845                 self.pkg_path = fetcher.pkg_path
3846                 self._start_task(fetcher, self._fetcher_exit)
3847
3848         def _fetcher_exit(self, fetcher):
3849
3850                 if self._default_exit(fetcher) != os.EX_OK:
3851                         self.wait()
3852                         return
3853
3854                 verifier = BinpkgVerifier(background=self.background,
3855                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3856                 self._start_task(verifier, self._verifier_exit)
3857
3858         def _verifier_exit(self, verifier):
3859                 if self._default_exit(verifier) != os.EX_OK:
3860                         self.wait()
3861                         return
3862
3863                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3864
3865                 self._current_task = None
3866                 self.returncode = os.EX_OK
3867                 self.wait()
3868
3869 class BinpkgExtractorAsync(SpawnProcess):
3870
3871         __slots__ = ("image_dir", "pkg", "pkg_path")
3872
3873         _shell_binary = portage.const.BASH_BINARY
3874
3875         def _start(self):
3876                 self.args = [self._shell_binary, "-c",
3877                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3878                         (portage._shell_quote(self.pkg_path),
3879                         portage._shell_quote(self.image_dir))]
3880
3881                 self.env = self.pkg.root_config.settings.environ()
3882                 SpawnProcess._start(self)
3883
3884 class MergeListItem(CompositeTask):
3885
3886         """
3887         TODO: For parallel scheduling, everything here needs asynchronous
3888         execution support (start, poll, and wait methods).
3889         """
3890
3891         __slots__ = ("args_set",
3892                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3893                 "find_blockers", "logger", "mtimedb", "pkg",
3894                 "pkg_count", "pkg_to_replace", "prefetcher",
3895                 "settings", "statusMessage", "world_atom") + \
3896                 ("_install_task",)
3897
3898         def _start(self):
3899
3900                 pkg = self.pkg
3901                 build_opts = self.build_opts
3902
3903                 if pkg.installed:
3904                         # uninstall,  executed by self.merge()
3905                         self.returncode = os.EX_OK
3906                         self.wait()
3907                         return
3908
3909                 args_set = self.args_set
3910                 find_blockers = self.find_blockers
3911                 logger = self.logger
3912                 mtimedb = self.mtimedb
3913                 pkg_count = self.pkg_count
3914                 scheduler = self.scheduler
3915                 settings = self.settings
3916                 world_atom = self.world_atom
3917                 ldpath_mtimes = mtimedb["ldpath"]
3918
3919                 action_desc = "Emerging"
3920                 preposition = "for"
3921                 if pkg.type_name == "binary":
3922                         action_desc += " binary"
3923
3924                 if build_opts.fetchonly:
3925                         action_desc = "Fetching"
3926
3927                 msg = "%s (%s of %s) %s" % \
3928                         (action_desc,
3929                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3930                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3931                         colorize("GOOD", pkg.cpv))
3932
3933                 portdb = pkg.root_config.trees["porttree"].dbapi
3934                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3935                 if portdir_repo_name:
3936                         pkg_repo_name = pkg.metadata.get("repository")
3937                         if pkg_repo_name != portdir_repo_name:
3938                                 if not pkg_repo_name:
3939                                         pkg_repo_name = "unknown repo"
3940                                 msg += " from %s" % pkg_repo_name
3941
3942                 if pkg.root != "/":
3943                         msg += " %s %s" % (preposition, pkg.root)
3944
3945                 if not build_opts.pretend:
3946                         self.statusMessage(msg)
3947                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3948                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3949
3950                 if pkg.type_name == "ebuild":
3951
3952                         build = EbuildBuild(args_set=args_set,
3953                                 background=self.background,
3954                                 config_pool=self.config_pool,
3955                                 find_blockers=find_blockers,
3956                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3957                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3958                                 prefetcher=self.prefetcher, scheduler=scheduler,
3959                                 settings=settings, world_atom=world_atom)
3960
3961                         self._install_task = build
3962                         self._start_task(build, self._default_final_exit)
3963                         return
3964
3965                 elif pkg.type_name == "binary":
3966
3967                         binpkg = Binpkg(background=self.background,
3968                                 find_blockers=find_blockers,
3969                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3970                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3971                                 prefetcher=self.prefetcher, settings=settings,
3972                                 scheduler=scheduler, world_atom=world_atom)
3973
3974                         self._install_task = binpkg
3975                         self._start_task(binpkg, self._default_final_exit)
3976                         return
3977
3978         def _poll(self):
3979                 self._install_task.poll()
3980                 return self.returncode
3981
3982         def _wait(self):
3983                 self._install_task.wait()
3984                 return self.returncode
3985
3986         def merge(self):
3987
3988                 pkg = self.pkg
3989                 build_opts = self.build_opts
3990                 find_blockers = self.find_blockers
3991                 logger = self.logger
3992                 mtimedb = self.mtimedb
3993                 pkg_count = self.pkg_count
3994                 prefetcher = self.prefetcher
3995                 scheduler = self.scheduler
3996                 settings = self.settings
3997                 world_atom = self.world_atom
3998                 ldpath_mtimes = mtimedb["ldpath"]
3999
4000                 if pkg.installed:
4001                         if not (build_opts.buildpkgonly or \
4002                                 build_opts.fetchonly or build_opts.pretend):
4003
4004                                 uninstall = PackageUninstall(background=self.background,
4005                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4006                                         pkg=pkg, scheduler=scheduler, settings=settings)
4007
4008                                 uninstall.start()
4009                                 retval = uninstall.wait()
4010                                 if retval != os.EX_OK:
4011                                         return retval
4012                         return os.EX_OK
4013
4014                 if build_opts.fetchonly or \
4015                         build_opts.buildpkgonly:
4016                         return self.returncode
4017
4018                 retval = self._install_task.install()
4019                 return retval
4020
4021 class PackageMerge(AsynchronousTask):
4022         """
4023         TODO: Implement asynchronous merge so that the scheduler can
4024         run while a merge is executing.
4025         """
4026
4027         __slots__ = ("merge",)
4028
4029         def _start(self):
4030
4031                 pkg = self.merge.pkg
4032                 pkg_count = self.merge.pkg_count
4033
4034                 if pkg.installed:
4035                         action_desc = "Uninstalling"
4036                         preposition = "from"
4037                         counter_str = ""
4038                 else:
4039                         action_desc = "Installing"
4040                         preposition = "to"
4041                         counter_str = "(%s of %s) " % \
4042                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4043                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4044
4045                 msg = "%s %s%s" % \
4046                         (action_desc,
4047                         counter_str,
4048                         colorize("GOOD", pkg.cpv))
4049
4050                 if pkg.root != "/":
4051                         msg += " %s %s" % (preposition, pkg.root)
4052
4053                 if not self.merge.build_opts.fetchonly and \
4054                         not self.merge.build_opts.pretend and \
4055                         not self.merge.build_opts.buildpkgonly:
4056                         self.merge.statusMessage(msg)
4057
4058                 self.returncode = self.merge.merge()
4059                 self.wait()
4060
4061 class DependencyArg(object):
4062         def __init__(self, arg=None, root_config=None):
4063                 self.arg = arg
4064                 self.root_config = root_config
4065
4066         def __str__(self):
4067                 return str(self.arg)
4068
4069 class AtomArg(DependencyArg):
4070         def __init__(self, atom=None, **kwargs):
4071                 DependencyArg.__init__(self, **kwargs)
4072                 self.atom = atom
4073                 if not isinstance(self.atom, portage.dep.Atom):
4074                         self.atom = portage.dep.Atom(self.atom)
4075                 self.set = (self.atom, )
4076
4077 class PackageArg(DependencyArg):
4078         def __init__(self, package=None, **kwargs):
4079                 DependencyArg.__init__(self, **kwargs)
4080                 self.package = package
4081                 self.atom = portage.dep.Atom("=" + package.cpv)
4082                 self.set = (self.atom, )
4083
4084 class SetArg(DependencyArg):
4085         def __init__(self, set=None, **kwargs):
4086                 DependencyArg.__init__(self, **kwargs)
4087                 self.set = set
4088                 self.name = self.arg[len(SETPREFIX):]
4089
4090 class Dependency(SlotObject):
4091         __slots__ = ("atom", "blocker", "depth",
4092                 "parent", "onlydeps", "priority", "root")
4093         def __init__(self, **kwargs):
4094                 SlotObject.__init__(self, **kwargs)
4095                 if self.priority is None:
4096                         self.priority = DepPriority()
4097                 if self.depth is None:
4098                         self.depth = 0
4099
4100 class BlockerCache(portage.cache.mappings.MutableMapping):
4101         """This caches blockers of installed packages so that dep_check does not
4102         have to be done for every single installed package on every invocation of
4103         emerge.  The cache is invalidated whenever it is detected that something
4104         has changed that might alter the results of dep_check() calls:
4105                 1) the set of installed packages (including COUNTER) has changed
4106                 2) the old-style virtuals have changed
4107         """
4108
4109         # Number of uncached packages to trigger cache update, since
4110         # it's wasteful to update it for every vdb change.
4111         _cache_threshold = 5
4112
4113         class BlockerData(object):
4114
4115                 __slots__ = ("__weakref__", "atoms", "counter")
4116
4117                 def __init__(self, counter, atoms):
4118                         self.counter = counter
4119                         self.atoms = atoms
4120
4121         def __init__(self, myroot, vardb):
4122                 self._vardb = vardb
4123                 self._virtuals = vardb.settings.getvirtuals()
4124                 self._cache_filename = os.path.join(myroot,
4125                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4126                 self._cache_version = "1"
4127                 self._cache_data = None
4128                 self._modified = set()
4129                 self._load()
4130
4131         def _load(self):
4132                 try:
4133                         f = open(self._cache_filename, mode='rb')
4134                         mypickle = pickle.Unpickler(f)
4135                         try:
4136                                 mypickle.find_global = None
4137                         except AttributeError:
4138                                 # TODO: If py3k, override Unpickler.find_class().
4139                                 pass
4140                         self._cache_data = mypickle.load()
4141                         f.close()
4142                         del f
4143                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4144                         if isinstance(e, pickle.UnpicklingError):
4145                                 writemsg("!!! Error loading '%s': %s\n" % \
4146                                         (self._cache_filename, str(e)), noiselevel=-1)
4147                         del e
4148
4149                 cache_valid = self._cache_data and \
4150                         isinstance(self._cache_data, dict) and \
4151                         self._cache_data.get("version") == self._cache_version and \
4152                         isinstance(self._cache_data.get("blockers"), dict)
4153                 if cache_valid:
4154                         # Validate all the atoms and counters so that
4155                         # corruption is detected as soon as possible.
4156                         invalid_items = set()
4157                         for k, v in self._cache_data["blockers"].iteritems():
4158                                 if not isinstance(k, basestring):
4159                                         invalid_items.add(k)
4160                                         continue
4161                                 try:
4162                                         if portage.catpkgsplit(k) is None:
4163                                                 invalid_items.add(k)
4164                                                 continue
4165                                 except portage.exception.InvalidData:
4166                                         invalid_items.add(k)
4167                                         continue
4168                                 if not isinstance(v, tuple) or \
4169                                         len(v) != 2:
4170                                         invalid_items.add(k)
4171                                         continue
4172                                 counter, atoms = v
4173                                 if not isinstance(counter, (int, long)):
4174                                         invalid_items.add(k)
4175                                         continue
4176                                 if not isinstance(atoms, (list, tuple)):
4177                                         invalid_items.add(k)
4178                                         continue
4179                                 invalid_atom = False
4180                                 for atom in atoms:
4181                                         if not isinstance(atom, basestring):
4182                                                 invalid_atom = True
4183                                                 break
4184                                         if atom[:1] != "!" or \
4185                                                 not portage.isvalidatom(
4186                                                 atom, allow_blockers=True):
4187                                                 invalid_atom = True
4188                                                 break
4189                                 if invalid_atom:
4190                                         invalid_items.add(k)
4191                                         continue
4192
4193                         for k in invalid_items:
4194                                 del self._cache_data["blockers"][k]
4195                         if not self._cache_data["blockers"]:
4196                                 cache_valid = False
4197
4198                 if not cache_valid:
4199                         self._cache_data = {"version":self._cache_version}
4200                         self._cache_data["blockers"] = {}
4201                         self._cache_data["virtuals"] = self._virtuals
4202                 self._modified.clear()
4203
4204         def flush(self):
4205                 """If the current user has permission and the internal blocker cache
4206                 been updated, save it to disk and mark it unmodified.  This is called
4207                 by emerge after it has proccessed blockers for all installed packages.
4208                 Currently, the cache is only written if the user has superuser
4209                 privileges (since that's required to obtain a lock), but all users
4210                 have read access and benefit from faster blocker lookups (as long as
4211                 the entire cache is still valid).  The cache is stored as a pickled
4212                 dict object with the following format:
4213
4214                 {
4215                         version : "1",
4216                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4217                         "virtuals" : vardb.settings.getvirtuals()
4218                 }
4219                 """
4220                 if len(self._modified) >= self._cache_threshold and \
4221                         secpass >= 2:
4222                         try:
4223                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4224                                 pickle.dump(self._cache_data, f, protocol=2)
4225                                 f.close()
4226                                 portage.util.apply_secpass_permissions(
4227                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4228                         except (IOError, OSError), e:
4229                                 pass
4230                         self._modified.clear()
4231
4232         def __setitem__(self, cpv, blocker_data):
4233                 """
4234                 Update the cache and mark it as modified for a future call to
4235                 self.flush().
4236
4237                 @param cpv: Package for which to cache blockers.
4238                 @type cpv: String
4239                 @param blocker_data: An object with counter and atoms attributes.
4240                 @type blocker_data: BlockerData
4241                 """
4242                 self._cache_data["blockers"][cpv] = \
4243                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4244                 self._modified.add(cpv)
4245
4246         def __iter__(self):
4247                 if self._cache_data is None:
4248                         # triggered by python-trace
4249                         return iter([])
4250                 return iter(self._cache_data["blockers"])
4251
4252         def __delitem__(self, cpv):
4253                 del self._cache_data["blockers"][cpv]
4254
4255         def __getitem__(self, cpv):
4256                 """
4257                 @rtype: BlockerData
4258                 @returns: An object with counter and atoms attributes.
4259                 """
4260                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4261
4262 class BlockerDB(object):
4263
4264         def __init__(self, root_config):
4265                 self._root_config = root_config
4266                 self._vartree = root_config.trees["vartree"]
4267                 self._portdb = root_config.trees["porttree"].dbapi
4268
4269                 self._dep_check_trees = None
4270                 self._fake_vartree = None
4271
4272         def _get_fake_vartree(self, acquire_lock=0):
4273                 fake_vartree = self._fake_vartree
4274                 if fake_vartree is None:
4275                         fake_vartree = FakeVartree(self._root_config,
4276                                 acquire_lock=acquire_lock)
4277                         self._fake_vartree = fake_vartree
4278                         self._dep_check_trees = { self._vartree.root : {
4279                                 "porttree"    :  fake_vartree,
4280                                 "vartree"     :  fake_vartree,
4281                         }}
4282                 else:
4283                         fake_vartree.sync(acquire_lock=acquire_lock)
4284                 return fake_vartree
4285
4286         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4287                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4288                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4289                 settings = self._vartree.settings
4290                 stale_cache = set(blocker_cache)
4291                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4292                 dep_check_trees = self._dep_check_trees
4293                 vardb = fake_vartree.dbapi
4294                 installed_pkgs = list(vardb)
4295
4296                 for inst_pkg in installed_pkgs:
4297                         stale_cache.discard(inst_pkg.cpv)
4298                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4299                         if cached_blockers is not None and \
4300                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4301                                 cached_blockers = None
4302                         if cached_blockers is not None:
4303                                 blocker_atoms = cached_blockers.atoms
4304                         else:
4305                                 # Use aux_get() to trigger FakeVartree global
4306                                 # updates on *DEPEND when appropriate.
4307                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4308                                 try:
4309                                         portage.dep._dep_check_strict = False
4310                                         success, atoms = portage.dep_check(depstr,
4311                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4312                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4313                                 finally:
4314                                         portage.dep._dep_check_strict = True
4315                                 if not success:
4316                                         pkg_location = os.path.join(inst_pkg.root,
4317                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4318                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4319                                                 (pkg_location, atoms), noiselevel=-1)
4320                                         continue
4321
4322                                 blocker_atoms = [atom for atom in atoms \
4323                                         if atom.startswith("!")]
4324                                 blocker_atoms.sort()
4325                                 counter = long(inst_pkg.metadata["COUNTER"])
4326                                 blocker_cache[inst_pkg.cpv] = \
4327                                         blocker_cache.BlockerData(counter, blocker_atoms)
4328                 for cpv in stale_cache:
4329                         del blocker_cache[cpv]
4330                 blocker_cache.flush()
4331
4332                 blocker_parents = digraph()
4333                 blocker_atoms = []
4334                 for pkg in installed_pkgs:
4335                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4336                                 blocker_atom = blocker_atom.lstrip("!")
4337                                 blocker_atoms.append(blocker_atom)
4338                                 blocker_parents.add(blocker_atom, pkg)
4339
4340                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4341                 blocking_pkgs = set()
4342                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4343                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4344
4345                 # Check for blockers in the other direction.
4346                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4347                 try:
4348                         portage.dep._dep_check_strict = False
4349                         success, atoms = portage.dep_check(depstr,
4350                                 vardb, settings, myuse=new_pkg.use.enabled,
4351                                 trees=dep_check_trees, myroot=new_pkg.root)
4352                 finally:
4353                         portage.dep._dep_check_strict = True
4354                 if not success:
4355                         # We should never get this far with invalid deps.
4356                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4357                         assert False
4358
4359                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4360                         if atom[:1] == "!"]
4361                 if blocker_atoms:
4362                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4363                         for inst_pkg in installed_pkgs:
4364                                 try:
4365                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4366                                 except (portage.exception.InvalidDependString, StopIteration):
4367                                         continue
4368                                 blocking_pkgs.add(inst_pkg)
4369
4370                 return blocking_pkgs
4371
4372 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4373
4374         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4375                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4376         p_type, p_root, p_key, p_status = parent_node
4377         msg = []
4378         if p_status == "nomerge":
4379                 category, pf = portage.catsplit(p_key)
4380                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4381                 msg.append("Portage is unable to process the dependencies of the ")
4382                 msg.append("'%s' package. " % p_key)
4383                 msg.append("In order to correct this problem, the package ")
4384                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4385                 msg.append("As a temporary workaround, the --nodeps option can ")
4386                 msg.append("be used to ignore all dependencies.  For reference, ")
4387                 msg.append("the problematic dependencies can be found in the ")
4388                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4389         else:
4390                 msg.append("This package can not be installed. ")
4391                 msg.append("Please notify the '%s' package maintainer " % p_key)
4392                 msg.append("about this problem.")
4393
4394         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4395         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4396
4397 class PackageVirtualDbapi(portage.dbapi):
4398         """
4399         A dbapi-like interface class that represents the state of the installed
4400         package database as new packages are installed, replacing any packages
4401         that previously existed in the same slot. The main difference between
4402         this class and fakedbapi is that this one uses Package instances
4403         internally (passed in via cpv_inject() and cpv_remove() calls).
4404         """
4405         def __init__(self, settings):
4406                 portage.dbapi.__init__(self)
4407                 self.settings = settings
4408                 self._match_cache = {}
4409                 self._cp_map = {}
4410                 self._cpv_map = {}
4411
4412         def clear(self):
4413                 """
4414                 Remove all packages.
4415                 """
4416                 if self._cpv_map:
4417                         self._clear_cache()
4418                         self._cp_map.clear()
4419                         self._cpv_map.clear()
4420
4421         def copy(self):
4422                 obj = PackageVirtualDbapi(self.settings)
4423                 obj._match_cache = self._match_cache.copy()
4424                 obj._cp_map = self._cp_map.copy()
4425                 for k, v in obj._cp_map.iteritems():
4426                         obj._cp_map[k] = v[:]
4427                 obj._cpv_map = self._cpv_map.copy()
4428                 return obj
4429
4430         def __iter__(self):
4431                 return self._cpv_map.itervalues()
4432
4433         def __contains__(self, item):
4434                 existing = self._cpv_map.get(item.cpv)
4435                 if existing is not None and \
4436                         existing == item:
4437                         return True
4438                 return False
4439
4440         def get(self, item, default=None):
4441                 cpv = getattr(item, "cpv", None)
4442                 if cpv is None:
4443                         if len(item) != 4:
4444                                 return default
4445                         type_name, root, cpv, operation = item
4446
4447                 existing = self._cpv_map.get(cpv)
4448                 if existing is not None and \
4449                         existing == item:
4450                         return existing
4451                 return default
4452
4453         def match_pkgs(self, atom):
4454                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4455
4456         def _clear_cache(self):
4457                 if self._categories is not None:
4458                         self._categories = None
4459                 if self._match_cache:
4460                         self._match_cache = {}
4461
4462         def match(self, origdep, use_cache=1):
4463                 result = self._match_cache.get(origdep)
4464                 if result is not None:
4465                         return result[:]
4466                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4467                 self._match_cache[origdep] = result
4468                 return result[:]
4469
4470         def cpv_exists(self, cpv):
4471                 return cpv in self._cpv_map
4472
4473         def cp_list(self, mycp, use_cache=1):
4474                 cachelist = self._match_cache.get(mycp)
4475                 # cp_list() doesn't expand old-style virtuals
4476                 if cachelist and cachelist[0].startswith(mycp):
4477                         return cachelist[:]
4478                 cpv_list = self._cp_map.get(mycp)
4479                 if cpv_list is None:
4480                         cpv_list = []
4481                 else:
4482                         cpv_list = [pkg.cpv for pkg in cpv_list]
4483                 self._cpv_sort_ascending(cpv_list)
4484                 if not (not cpv_list and mycp.startswith("virtual/")):
4485                         self._match_cache[mycp] = cpv_list
4486                 return cpv_list[:]
4487
4488         def cp_all(self):
4489                 return list(self._cp_map)
4490
4491         def cpv_all(self):
4492                 return list(self._cpv_map)
4493
4494         def cpv_inject(self, pkg):
4495                 cp_list = self._cp_map.get(pkg.cp)
4496                 if cp_list is None:
4497                         cp_list = []
4498                         self._cp_map[pkg.cp] = cp_list
4499                 e_pkg = self._cpv_map.get(pkg.cpv)
4500                 if e_pkg is not None:
4501                         if e_pkg == pkg:
4502                                 return
4503                         self.cpv_remove(e_pkg)
4504                 for e_pkg in cp_list:
4505                         if e_pkg.slot_atom == pkg.slot_atom:
4506                                 if e_pkg == pkg:
4507                                         return
4508                                 self.cpv_remove(e_pkg)
4509                                 break
4510                 cp_list.append(pkg)
4511                 self._cpv_map[pkg.cpv] = pkg
4512                 self._clear_cache()
4513
4514         def cpv_remove(self, pkg):
4515                 old_pkg = self._cpv_map.get(pkg.cpv)
4516                 if old_pkg != pkg:
4517                         raise KeyError(pkg)
4518                 self._cp_map[pkg.cp].remove(pkg)
4519                 del self._cpv_map[pkg.cpv]
4520                 self._clear_cache()
4521
4522         def aux_get(self, cpv, wants):
4523                 metadata = self._cpv_map[cpv].metadata
4524                 return [metadata.get(x, "") for x in wants]
4525
4526         def aux_update(self, cpv, values):
4527                 self._cpv_map[cpv].metadata.update(values)
4528                 self._clear_cache()
4529
4530 class depgraph(object):
4531
4532         pkg_tree_map = RootConfig.pkg_tree_map
4533
4534         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4535
4536         def __init__(self, settings, trees, myopts, myparams, spinner):
4537                 self.settings = settings
4538                 self.target_root = settings["ROOT"]
4539                 self.myopts = myopts
4540                 self.myparams = myparams
4541                 self.edebug = 0
4542                 if settings.get("PORTAGE_DEBUG", "") == "1":
4543                         self.edebug = 1
4544                 self.spinner = spinner
4545                 self._running_root = trees["/"]["root_config"]
4546                 self._opts_no_restart = Scheduler._opts_no_restart
4547                 self.pkgsettings = {}
4548                 # Maps slot atom to package for each Package added to the graph.
4549                 self._slot_pkg_map = {}
4550                 # Maps nodes to the reasons they were selected for reinstallation.
4551                 self._reinstall_nodes = {}
4552                 self.mydbapi = {}
4553                 self.trees = {}
4554                 self._trees_orig = trees
4555                 self.roots = {}
4556                 # Contains a filtered view of preferred packages that are selected
4557                 # from available repositories.
4558                 self._filtered_trees = {}
4559                 # Contains installed packages and new packages that have been added
4560                 # to the graph.
4561                 self._graph_trees = {}
4562                 # All Package instances
4563                 self._pkg_cache = {}
4564                 for myroot in trees:
4565                         self.trees[myroot] = {}
4566                         # Create a RootConfig instance that references
4567                         # the FakeVartree instead of the real one.
4568                         self.roots[myroot] = RootConfig(
4569                                 trees[myroot]["vartree"].settings,
4570                                 self.trees[myroot],
4571                                 trees[myroot]["root_config"].setconfig)
4572                         for tree in ("porttree", "bintree"):
4573                                 self.trees[myroot][tree] = trees[myroot][tree]
4574                         self.trees[myroot]["vartree"] = \
4575                                 FakeVartree(trees[myroot]["root_config"],
4576                                         pkg_cache=self._pkg_cache)
4577                         self.pkgsettings[myroot] = portage.config(
4578                                 clone=self.trees[myroot]["vartree"].settings)
4579                         self._slot_pkg_map[myroot] = {}
4580                         vardb = self.trees[myroot]["vartree"].dbapi
4581                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4582                                 "--buildpkgonly" not in self.myopts
4583                         # This fakedbapi instance will model the state that the vdb will
4584                         # have after new packages have been installed.
4585                         fakedb = PackageVirtualDbapi(vardb.settings)
4586                         if preload_installed_pkgs:
4587                                 for pkg in vardb:
4588                                         self.spinner.update()
4589                                         # This triggers metadata updates via FakeVartree.
4590                                         vardb.aux_get(pkg.cpv, [])
4591                                         fakedb.cpv_inject(pkg)
4592
4593                         # Now that the vardb state is cached in our FakeVartree,
4594                         # we won't be needing the real vartree cache for awhile.
4595                         # To make some room on the heap, clear the vardbapi
4596                         # caches.
4597                         trees[myroot]["vartree"].dbapi._clear_cache()
4598                         gc.collect()
4599
4600                         self.mydbapi[myroot] = fakedb
4601                         def graph_tree():
4602                                 pass
4603                         graph_tree.dbapi = fakedb
4604                         self._graph_trees[myroot] = {}
4605                         self._filtered_trees[myroot] = {}
4606                         # Substitute the graph tree for the vartree in dep_check() since we
4607                         # want atom selections to be consistent with package selections
4608                         # have already been made.
4609                         self._graph_trees[myroot]["porttree"]   = graph_tree
4610                         self._graph_trees[myroot]["vartree"]    = graph_tree
4611                         def filtered_tree():
4612                                 pass
4613                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4614                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4615
4616                         # Passing in graph_tree as the vartree here could lead to better
4617                         # atom selections in some cases by causing atoms for packages that
4618                         # have been added to the graph to be preferred over other choices.
4619                         # However, it can trigger atom selections that result in
4620                         # unresolvable direct circular dependencies. For example, this
4621                         # happens with gwydion-dylan which depends on either itself or
4622                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4623                         # gwydion-dylan-bin needs to be selected in order to avoid a
4624                         # an unresolvable direct circular dependency.
4625                         #
4626                         # To solve the problem described above, pass in "graph_db" so that
4627                         # packages that have been added to the graph are distinguishable
4628                         # from other available packages and installed packages. Also, pass
4629                         # the parent package into self._select_atoms() calls so that
4630                         # unresolvable direct circular dependencies can be detected and
4631                         # avoided when possible.
4632                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4633                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4634
4635                         dbs = []
4636                         portdb = self.trees[myroot]["porttree"].dbapi
4637                         bindb  = self.trees[myroot]["bintree"].dbapi
4638                         vardb  = self.trees[myroot]["vartree"].dbapi
4639                         #               (db, pkg_type, built, installed, db_keys)
4640                         if "--usepkgonly" not in self.myopts:
4641                                 db_keys = list(portdb._aux_cache_keys)
4642                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4643                         if "--usepkg" in self.myopts:
4644                                 db_keys = list(bindb._aux_cache_keys)
4645                                 dbs.append((bindb,  "binary", True, False, db_keys))
4646                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4647                         dbs.append((vardb, "installed", True, True, db_keys))
4648                         self._filtered_trees[myroot]["dbs"] = dbs
4649                         if "--usepkg" in self.myopts:
4650                                 self.trees[myroot]["bintree"].populate(
4651                                         "--getbinpkg" in self.myopts,
4652                                         "--getbinpkgonly" in self.myopts)
4653                 del trees
4654
4655                 self.digraph=portage.digraph()
4656                 # contains all sets added to the graph
4657                 self._sets = {}
4658                 # contains atoms given as arguments
4659                 self._sets["args"] = InternalPackageSet()
4660                 # contains all atoms from all sets added to the graph, including
4661                 # atoms given as arguments
4662                 self._set_atoms = InternalPackageSet()
4663                 self._atom_arg_map = {}
4664                 # contains all nodes pulled in by self._set_atoms
4665                 self._set_nodes = set()
4666                 # Contains only Blocker -> Uninstall edges
4667                 self._blocker_uninstalls = digraph()
4668                 # Contains only Package -> Blocker edges
4669                 self._blocker_parents = digraph()
4670                 # Contains only irrelevant Package -> Blocker edges
4671                 self._irrelevant_blockers = digraph()
4672                 # Contains only unsolvable Package -> Blocker edges
4673                 self._unsolvable_blockers = digraph()
4674                 # Contains all Blocker -> Blocked Package edges
4675                 self._blocked_pkgs = digraph()
4676                 # Contains world packages that have been protected from
4677                 # uninstallation but may not have been added to the graph
4678                 # if the graph is not complete yet.
4679                 self._blocked_world_pkgs = {}
4680                 self._slot_collision_info = {}
4681                 # Slot collision nodes are not allowed to block other packages since
4682                 # blocker validation is only able to account for one package per slot.
4683                 self._slot_collision_nodes = set()
4684                 self._parent_atoms = {}
4685                 self._slot_conflict_parent_atoms = set()
4686                 self._serialized_tasks_cache = None
4687                 self._scheduler_graph = None
4688                 self._displayed_list = None
4689                 self._pprovided_args = []
4690                 self._missing_args = []
4691                 self._masked_installed = set()
4692                 self._unsatisfied_deps_for_display = []
4693                 self._unsatisfied_blockers_for_display = None
4694                 self._circular_deps_for_display = None
4695                 self._dep_stack = []
4696                 self._unsatisfied_deps = []
4697                 self._initially_unsatisfied_deps = []
4698                 self._ignored_deps = []
4699                 self._required_set_names = set(["system", "world"])
4700                 self._select_atoms = self._select_atoms_highest_available
4701                 self._select_package = self._select_pkg_highest_available
4702                 self._highest_pkg_cache = {}
4703
4704         def _show_slot_collision_notice(self):
4705                 """Show an informational message advising the user to mask one of the
4706                 the packages. In some cases it may be possible to resolve this
4707                 automatically, but support for backtracking (removal nodes that have
4708                 already been selected) will be required in order to handle all possible
4709                 cases.
4710                 """
4711
4712                 if not self._slot_collision_info:
4713                         return
4714
4715                 self._show_merge_list()
4716
4717                 msg = []
4718                 msg.append("\n!!! Multiple package instances within a single " + \
4719                         "package slot have been pulled\n")
4720                 msg.append("!!! into the dependency graph, resulting" + \
4721                         " in a slot conflict:\n\n")
4722                 indent = "  "
4723                 # Max number of parents shown, to avoid flooding the display.
4724                 max_parents = 3
4725                 explanation_columns = 70
4726                 explanations = 0
4727                 for (slot_atom, root), slot_nodes \
4728                         in self._slot_collision_info.iteritems():
4729                         msg.append(str(slot_atom))
4730                         msg.append("\n\n")
4731
4732                         for node in slot_nodes:
4733                                 msg.append(indent)
4734                                 msg.append(str(node))
4735                                 parent_atoms = self._parent_atoms.get(node)
4736                                 if parent_atoms:
4737                                         pruned_list = set()
4738                                         # Prefer conflict atoms over others.
4739                                         for parent_atom in parent_atoms:
4740                                                 if len(pruned_list) >= max_parents:
4741                                                         break
4742                                                 if parent_atom in self._slot_conflict_parent_atoms:
4743                                                         pruned_list.add(parent_atom)
4744
4745                                         # If this package was pulled in by conflict atoms then
4746                                         # show those alone since those are the most interesting.
4747                                         if not pruned_list:
4748                                                 # When generating the pruned list, prefer instances
4749                                                 # of DependencyArg over instances of Package.
4750                                                 for parent_atom in parent_atoms:
4751                                                         if len(pruned_list) >= max_parents:
4752                                                                 break
4753                                                         parent, atom = parent_atom
4754                                                         if isinstance(parent, DependencyArg):
4755                                                                 pruned_list.add(parent_atom)
4756                                                 # Prefer Packages instances that themselves have been
4757                                                 # pulled into collision slots.
4758                                                 for parent_atom in parent_atoms:
4759                                                         if len(pruned_list) >= max_parents:
4760                                                                 break
4761                                                         parent, atom = parent_atom
4762                                                         if isinstance(parent, Package) and \
4763                                                                 (parent.slot_atom, parent.root) \
4764                                                                 in self._slot_collision_info:
4765                                                                 pruned_list.add(parent_atom)
4766                                                 for parent_atom in parent_atoms:
4767                                                         if len(pruned_list) >= max_parents:
4768                                                                 break
4769                                                         pruned_list.add(parent_atom)
4770                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4771                                         parent_atoms = pruned_list
4772                                         msg.append(" pulled in by\n")
4773                                         for parent_atom in parent_atoms:
4774                                                 parent, atom = parent_atom
4775                                                 msg.append(2*indent)
4776                                                 if isinstance(parent,
4777                                                         (PackageArg, AtomArg)):
4778                                                         # For PackageArg and AtomArg types, it's
4779                                                         # redundant to display the atom attribute.
4780                                                         msg.append(str(parent))
4781                                                 else:
4782                                                         # Display the specific atom from SetArg or
4783                                                         # Package types.
4784                                                         msg.append("%s required by %s" % (atom, parent))
4785                                                 msg.append("\n")
4786                                         if omitted_parents:
4787                                                 msg.append(2*indent)
4788                                                 msg.append("(and %d more)\n" % omitted_parents)
4789                                 else:
4790                                         msg.append(" (no parents)\n")
4791                                 msg.append("\n")
4792                         explanation = self._slot_conflict_explanation(slot_nodes)
4793                         if explanation:
4794                                 explanations += 1
4795                                 msg.append(indent + "Explanation:\n\n")
4796                                 for line in textwrap.wrap(explanation, explanation_columns):
4797                                         msg.append(2*indent + line + "\n")
4798                                 msg.append("\n")
4799                 msg.append("\n")
4800                 sys.stderr.write("".join(msg))
4801                 sys.stderr.flush()
4802
4803                 explanations_for_all = explanations == len(self._slot_collision_info)
4804
4805                 if explanations_for_all or "--quiet" in self.myopts:
4806                         return
4807
4808                 msg = []
4809                 msg.append("It may be possible to solve this problem ")
4810                 msg.append("by using package.mask to prevent one of ")
4811                 msg.append("those packages from being selected. ")
4812                 msg.append("However, it is also possible that conflicting ")
4813                 msg.append("dependencies exist such that they are impossible to ")
4814                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4815                 msg.append("the dependencies of two different packages, then those ")
4816                 msg.append("packages can not be installed simultaneously.")
4817
4818                 from formatter import AbstractFormatter, DumbWriter
4819                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4820                 for x in msg:
4821                         f.add_flowing_data(x)
4822                 f.end_paragraph(1)
4823
4824                 msg = []
4825                 msg.append("For more information, see MASKED PACKAGES ")
4826                 msg.append("section in the emerge man page or refer ")
4827                 msg.append("to the Gentoo Handbook.")
4828                 for x in msg:
4829                         f.add_flowing_data(x)
4830                 f.end_paragraph(1)
4831                 f.writer.flush()
4832
4833         def _slot_conflict_explanation(self, slot_nodes):
4834                 """
4835                 When a slot conflict occurs due to USE deps, there are a few
4836                 different cases to consider:
4837
4838                 1) New USE are correctly set but --newuse wasn't requested so an
4839                    installed package with incorrect USE happened to get pulled
4840                    into graph before the new one.
4841
4842                 2) New USE are incorrectly set but an installed package has correct
4843                    USE so it got pulled into the graph, and a new instance also got
4844                    pulled in due to --newuse or an upgrade.
4845
4846                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4847                    and multiple package instances got pulled into the same slot to
4848                    satisfy the conflicting deps.
4849
4850                 Currently, explanations and suggested courses of action are generated
4851                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4852                 """
4853
4854                 if len(slot_nodes) != 2:
4855                         # Suggestions are only implemented for
4856                         # conflicts between two packages.
4857                         return None
4858
4859                 all_conflict_atoms = self._slot_conflict_parent_atoms
4860                 matched_node = None
4861                 matched_atoms = None
4862                 unmatched_node = None
4863                 for node in slot_nodes:
4864                         parent_atoms = self._parent_atoms.get(node)
4865                         if not parent_atoms:
4866                                 # Normally, there are always parent atoms. If there are
4867                                 # none then something unexpected is happening and there's
4868                                 # currently no suggestion for this case.
4869                                 return None
4870                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4871                         for parent_atom in conflict_atoms:
4872                                 parent, atom = parent_atom
4873                                 if not atom.use:
4874                                         # Suggestions are currently only implemented for cases
4875                                         # in which all conflict atoms have USE deps.
4876                                         return None
4877                         if conflict_atoms:
4878                                 if matched_node is not None:
4879                                         # If conflict atoms match multiple nodes
4880                                         # then there's no suggestion.
4881                                         return None
4882                                 matched_node = node
4883                                 matched_atoms = conflict_atoms
4884                         else:
4885                                 if unmatched_node is not None:
4886                                         # Neither node is matched by conflict atoms, and
4887                                         # there is no suggestion for this case.
4888                                         return None
4889                                 unmatched_node = node
4890
4891                 if matched_node is None or unmatched_node is None:
4892                         # This shouldn't happen.
4893                         return None
4894
4895                 if unmatched_node.installed and not matched_node.installed and \
4896                         unmatched_node.cpv == matched_node.cpv:
4897                         # If the conflicting packages are the same version then
4898                         # --newuse should be all that's needed. If they are different
4899                         # versions then there's some other problem.
4900                         return "New USE are correctly set, but --newuse wasn't" + \
4901                                 " requested, so an installed package with incorrect USE " + \
4902                                 "happened to get pulled into the dependency graph. " + \
4903                                 "In order to solve " + \
4904                                 "this, either specify the --newuse option or explicitly " + \
4905                                 " reinstall '%s'." % matched_node.slot_atom
4906
4907                 if matched_node.installed and not unmatched_node.installed:
4908                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4909                         explanation = ("New USE for '%s' are incorrectly set. " + \
4910                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4911                                 (matched_node.slot_atom, atoms[0])
4912                         if len(atoms) > 1:
4913                                 for atom in atoms[1:-1]:
4914                                         explanation += ", '%s'" % (atom,)
4915                                 if len(atoms) > 2:
4916                                         explanation += ","
4917                                 explanation += " and '%s'" % (atoms[-1],)
4918                         explanation += "."
4919                         return explanation
4920
4921                 return None
4922
4923         def _process_slot_conflicts(self):
4924                 """
4925                 Process slot conflict data to identify specific atoms which
4926                 lead to conflict. These atoms only match a subset of the
4927                 packages that have been pulled into a given slot.
4928                 """
4929                 for (slot_atom, root), slot_nodes \
4930                         in self._slot_collision_info.iteritems():
4931
4932                         all_parent_atoms = set()
4933                         for pkg in slot_nodes:
4934                                 parent_atoms = self._parent_atoms.get(pkg)
4935                                 if not parent_atoms:
4936                                         continue
4937                                 all_parent_atoms.update(parent_atoms)
4938
4939                         for pkg in slot_nodes:
4940                                 parent_atoms = self._parent_atoms.get(pkg)
4941                                 if parent_atoms is None:
4942                                         parent_atoms = set()
4943                                         self._parent_atoms[pkg] = parent_atoms
4944                                 for parent_atom in all_parent_atoms:
4945                                         if parent_atom in parent_atoms:
4946                                                 continue
4947                                         # Use package set for matching since it will match via
4948                                         # PROVIDE when necessary, while match_from_list does not.
4949                                         parent, atom = parent_atom
4950                                         atom_set = InternalPackageSet(
4951                                                 initial_atoms=(atom,))
4952                                         if atom_set.findAtomForPackage(pkg):
4953                                                 parent_atoms.add(parent_atom)
4954                                         else:
4955                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4956
4957         def _reinstall_for_flags(self, forced_flags,
4958                 orig_use, orig_iuse, cur_use, cur_iuse):
4959                 """Return a set of flags that trigger reinstallation, or None if there
4960                 are no such flags."""
4961                 if "--newuse" in self.myopts:
4962                         flags = set(orig_iuse.symmetric_difference(
4963                                 cur_iuse).difference(forced_flags))
4964                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4965                                 cur_iuse.intersection(cur_use)))
4966                         if flags:
4967                                 return flags
4968                 elif "changed-use" == self.myopts.get("--reinstall"):
4969                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4970                                 cur_iuse.intersection(cur_use))
4971                         if flags:
4972                                 return flags
4973                 return None
4974
4975         def _create_graph(self, allow_unsatisfied=False):
4976                 dep_stack = self._dep_stack
4977                 while dep_stack:
4978                         self.spinner.update()
4979                         dep = dep_stack.pop()
4980                         if isinstance(dep, Package):
4981                                 if not self._add_pkg_deps(dep,
4982                                         allow_unsatisfied=allow_unsatisfied):
4983                                         return 0
4984                                 continue
4985                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4986                                 return 0
4987                 return 1
4988
4989         def _add_dep(self, dep, allow_unsatisfied=False):
4990                 debug = "--debug" in self.myopts
4991                 buildpkgonly = "--buildpkgonly" in self.myopts
4992                 nodeps = "--nodeps" in self.myopts
4993                 empty = "empty" in self.myparams
4994                 deep = "deep" in self.myparams
4995                 update = "--update" in self.myopts and dep.depth <= 1
4996                 if dep.blocker:
4997                         if not buildpkgonly and \
4998                                 not nodeps and \
4999                                 dep.parent not in self._slot_collision_nodes:
5000                                 if dep.parent.onlydeps:
5001                                         # It's safe to ignore blockers if the
5002                                         # parent is an --onlydeps node.
5003                                         return 1
5004                                 # The blocker applies to the root where
5005                                 # the parent is or will be installed.
5006                                 blocker = Blocker(atom=dep.atom,
5007                                         eapi=dep.parent.metadata["EAPI"],
5008                                         root=dep.parent.root)
5009                                 self._blocker_parents.add(blocker, dep.parent)
5010                         return 1
5011                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5012                         onlydeps=dep.onlydeps)
5013                 if not dep_pkg:
5014                         if dep.priority.optional:
5015                                 # This could be an unecessary build-time dep
5016                                 # pulled in by --with-bdeps=y.
5017                                 return 1
5018                         if allow_unsatisfied:
5019                                 self._unsatisfied_deps.append(dep)
5020                                 return 1
5021                         self._unsatisfied_deps_for_display.append(
5022                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5023                         return 0
5024                 # In some cases, dep_check will return deps that shouldn't
5025                 # be proccessed any further, so they are identified and
5026                 # discarded here. Try to discard as few as possible since
5027                 # discarded dependencies reduce the amount of information
5028                 # available for optimization of merge order.
5029                 if dep.priority.satisfied and \
5030                         not dep_pkg.installed and \
5031                         not (existing_node or empty or deep or update):
5032                         myarg = None
5033                         if dep.root == self.target_root:
5034                                 try:
5035                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5036                                 except StopIteration:
5037                                         pass
5038                                 except portage.exception.InvalidDependString:
5039                                         if not dep_pkg.installed:
5040                                                 # This shouldn't happen since the package
5041                                                 # should have been masked.
5042                                                 raise
5043                         if not myarg:
5044                                 self._ignored_deps.append(dep)
5045                                 return 1
5046
5047                 if not self._add_pkg(dep_pkg, dep):
5048                         return 0
5049                 return 1
5050
5051         def _add_pkg(self, pkg, dep):
5052                 myparent = None
5053                 priority = None
5054                 depth = 0
5055                 if dep is None:
5056                         dep = Dependency()
5057                 else:
5058                         myparent = dep.parent
5059                         priority = dep.priority
5060                         depth = dep.depth
5061                 if priority is None:
5062                         priority = DepPriority()
5063                 """
5064                 Fills the digraph with nodes comprised of packages to merge.
5065                 mybigkey is the package spec of the package to merge.
5066                 myparent is the package depending on mybigkey ( or None )
5067                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5068                         Think --onlydeps, we need to ignore packages in that case.
5069                 #stuff to add:
5070                 #SLOT-aware emerge
5071                 #IUSE-aware emerge -> USE DEP aware depgraph
5072                 #"no downgrade" emerge
5073                 """
5074                 # Ensure that the dependencies of the same package
5075                 # are never processed more than once.
5076                 previously_added = pkg in self.digraph
5077
5078                 # select the correct /var database that we'll be checking against
5079                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5080                 pkgsettings = self.pkgsettings[pkg.root]
5081
5082                 arg_atoms = None
5083                 if True:
5084                         try:
5085                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5086                         except portage.exception.InvalidDependString, e:
5087                                 if not pkg.installed:
5088                                         show_invalid_depstring_notice(
5089                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5090                                         return 0
5091                                 del e
5092
5093                 if not pkg.onlydeps:
5094                         if not pkg.installed and \
5095                                 "empty" not in self.myparams and \
5096                                 vardbapi.match(pkg.slot_atom):
5097                                 # Increase the priority of dependencies on packages that
5098                                 # are being rebuilt. This optimizes merge order so that
5099                                 # dependencies are rebuilt/updated as soon as possible,
5100                                 # which is needed especially when emerge is called by
5101                                 # revdep-rebuild since dependencies may be affected by ABI
5102                                 # breakage that has rendered them useless. Don't adjust
5103                                 # priority here when in "empty" mode since all packages
5104                                 # are being merged in that case.
5105                                 priority.rebuild = True
5106
5107                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5108                         slot_collision = False
5109                         if existing_node:
5110                                 existing_node_matches = pkg.cpv == existing_node.cpv
5111                                 if existing_node_matches and \
5112                                         pkg != existing_node and \
5113                                         dep.atom is not None:
5114                                         # Use package set for matching since it will match via
5115                                         # PROVIDE when necessary, while match_from_list does not.
5116                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5117                                         if not atom_set.findAtomForPackage(existing_node):
5118                                                 existing_node_matches = False
5119                                 if existing_node_matches:
5120                                         # The existing node can be reused.
5121                                         if arg_atoms:
5122                                                 for parent_atom in arg_atoms:
5123                                                         parent, atom = parent_atom
5124                                                         self.digraph.add(existing_node, parent,
5125                                                                 priority=priority)
5126                                                         self._add_parent_atom(existing_node, parent_atom)
5127                                         # If a direct circular dependency is not an unsatisfied
5128                                         # buildtime dependency then drop it here since otherwise
5129                                         # it can skew the merge order calculation in an unwanted
5130                                         # way.
5131                                         if existing_node != myparent or \
5132                                                 (priority.buildtime and not priority.satisfied):
5133                                                 self.digraph.addnode(existing_node, myparent,
5134                                                         priority=priority)
5135                                                 if dep.atom is not None and dep.parent is not None:
5136                                                         self._add_parent_atom(existing_node,
5137                                                                 (dep.parent, dep.atom))
5138                                         return 1
5139                                 else:
5140
5141                                         # A slot collision has occurred.  Sometimes this coincides
5142                                         # with unresolvable blockers, so the slot collision will be
5143                                         # shown later if there are no unresolvable blockers.
5144                                         self._add_slot_conflict(pkg)
5145                                         slot_collision = True
5146
5147                         if slot_collision:
5148                                 # Now add this node to the graph so that self.display()
5149                                 # can show use flags and --tree portage.output.  This node is
5150                                 # only being partially added to the graph.  It must not be
5151                                 # allowed to interfere with the other nodes that have been
5152                                 # added.  Do not overwrite data for existing nodes in
5153                                 # self.mydbapi since that data will be used for blocker
5154                                 # validation.
5155                                 # Even though the graph is now invalid, continue to process
5156                                 # dependencies so that things like --fetchonly can still
5157                                 # function despite collisions.
5158                                 pass
5159                         elif not previously_added:
5160                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5161                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5162                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5163
5164                         if not pkg.installed:
5165                                 # Allow this package to satisfy old-style virtuals in case it
5166                                 # doesn't already. Any pre-existing providers will be preferred
5167                                 # over this one.
5168                                 try:
5169                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5170                                         # For consistency, also update the global virtuals.
5171                                         settings = self.roots[pkg.root].settings
5172                                         settings.unlock()
5173                                         settings.setinst(pkg.cpv, pkg.metadata)
5174                                         settings.lock()
5175                                 except portage.exception.InvalidDependString, e:
5176                                         show_invalid_depstring_notice(
5177                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5178                                         del e
5179                                         return 0
5180
5181                 if arg_atoms:
5182                         self._set_nodes.add(pkg)
5183
5184                 # Do this even when addme is False (--onlydeps) so that the
5185                 # parent/child relationship is always known in case
5186                 # self._show_slot_collision_notice() needs to be called later.
5187                 self.digraph.add(pkg, myparent, priority=priority)
5188                 if dep.atom is not None and dep.parent is not None:
5189                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5190
5191                 if arg_atoms:
5192                         for parent_atom in arg_atoms:
5193                                 parent, atom = parent_atom
5194                                 self.digraph.add(pkg, parent, priority=priority)
5195                                 self._add_parent_atom(pkg, parent_atom)
5196
5197                 """ This section determines whether we go deeper into dependencies or not.
5198                     We want to go deeper on a few occasions:
5199                     Installing package A, we need to make sure package A's deps are met.
5200                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5201                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5202                 """
5203                 dep_stack = self._dep_stack
5204                 if "recurse" not in self.myparams:
5205                         return 1
5206                 elif pkg.installed and \
5207                         "deep" not in self.myparams:
5208                         dep_stack = self._ignored_deps
5209
5210                 self.spinner.update()
5211
5212                 if arg_atoms:
5213                         depth = 0
5214                 pkg.depth = depth
5215                 if not previously_added:
5216                         dep_stack.append(pkg)
5217                 return 1
5218
5219         def _add_parent_atom(self, pkg, parent_atom):
5220                 parent_atoms = self._parent_atoms.get(pkg)
5221                 if parent_atoms is None:
5222                         parent_atoms = set()
5223                         self._parent_atoms[pkg] = parent_atoms
5224                 parent_atoms.add(parent_atom)
5225
5226         def _add_slot_conflict(self, pkg):
5227                 self._slot_collision_nodes.add(pkg)
5228                 slot_key = (pkg.slot_atom, pkg.root)
5229                 slot_nodes = self._slot_collision_info.get(slot_key)
5230                 if slot_nodes is None:
5231                         slot_nodes = set()
5232                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5233                         self._slot_collision_info[slot_key] = slot_nodes
5234                 slot_nodes.add(pkg)
5235
5236         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5237
5238                 mytype = pkg.type_name
5239                 myroot = pkg.root
5240                 mykey = pkg.cpv
5241                 metadata = pkg.metadata
5242                 myuse = pkg.use.enabled
5243                 jbigkey = pkg
5244                 depth = pkg.depth + 1
5245                 removal_action = "remove" in self.myparams
5246
5247                 edepend={}
5248                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5249                 for k in depkeys:
5250                         edepend[k] = metadata[k]
5251
5252                 if not pkg.built and \
5253                         "--buildpkgonly" in self.myopts and \
5254                         "deep" not in self.myparams and \
5255                         "empty" not in self.myparams:
5256                         edepend["RDEPEND"] = ""
5257                         edepend["PDEPEND"] = ""
5258                 bdeps_optional = False
5259
5260                 if pkg.built and not removal_action:
5261                         if self.myopts.get("--with-bdeps", "n") == "y":
5262                                 # Pull in build time deps as requested, but marked them as
5263                                 # "optional" since they are not strictly required. This allows
5264                                 # more freedom in the merge order calculation for solving
5265                                 # circular dependencies. Don't convert to PDEPEND since that
5266                                 # could make --with-bdeps=y less effective if it is used to
5267                                 # adjust merge order to prevent built_with_use() calls from
5268                                 # failing.
5269                                 bdeps_optional = True
5270                         else:
5271                                 # built packages do not have build time dependencies.
5272                                 edepend["DEPEND"] = ""
5273
5274                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5275                         edepend["DEPEND"] = ""
5276
5277                 bdeps_root = "/"
5278                 if self.target_root != "/":
5279                         if "--root-deps" in self.myopts:
5280                                         bdeps_root = myroot
5281                         if "--rdeps-only" in self.myopts:
5282                                         bdeps_root = "/"
5283                                         edepend["DEPEND"] = ""
5284
5285                 deps = (
5286                         (bdeps_root, edepend["DEPEND"],
5287                                 self._priority(buildtime=(not bdeps_optional),
5288                                 optional=bdeps_optional)),
5289                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5290                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5291                 )
5292
5293                 debug = "--debug" in self.myopts
5294                 strict = mytype != "installed"
5295                 try:
5296                         for dep_root, dep_string, dep_priority in deps:
5297                                 if not dep_string:
5298                                         continue
5299                                 if debug:
5300                                         print
5301                                         print "Parent:   ", jbigkey
5302                                         print "Depstring:", dep_string
5303                                         print "Priority:", dep_priority
5304                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5305                                 try:
5306                                         selected_atoms = self._select_atoms(dep_root,
5307                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5308                                                 priority=dep_priority)
5309                                 except portage.exception.InvalidDependString, e:
5310                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5311                                         return 0
5312                                 if debug:
5313                                         print "Candidates:", selected_atoms
5314
5315                                 for atom in selected_atoms:
5316                                         try:
5317
5318                                                 atom = portage.dep.Atom(atom)
5319
5320                                                 mypriority = dep_priority.copy()
5321                                                 if not atom.blocker and vardb.match(atom):
5322                                                         mypriority.satisfied = True
5323
5324                                                 if not self._add_dep(Dependency(atom=atom,
5325                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5326                                                         priority=mypriority, root=dep_root),
5327                                                         allow_unsatisfied=allow_unsatisfied):
5328                                                         return 0
5329
5330                                         except portage.exception.InvalidAtom, e:
5331                                                 show_invalid_depstring_notice(
5332                                                         pkg, dep_string, str(e))
5333                                                 del e
5334                                                 if not pkg.installed:
5335                                                         return 0
5336
5337                                 if debug:
5338                                         print "Exiting...", jbigkey
5339                 except portage.exception.AmbiguousPackageName, e:
5340                         pkgs = e.args[0]
5341                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5342                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5343                         for cpv in pkgs:
5344                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5345                         portage.writemsg("\n", noiselevel=-1)
5346                         if mytype == "binary":
5347                                 portage.writemsg(
5348                                         "!!! This binary package cannot be installed: '%s'\n" % \
5349                                         mykey, noiselevel=-1)
5350                         elif mytype == "ebuild":
5351                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5352                                 myebuild, mylocation = portdb.findname2(mykey)
5353                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5354                                         "'%s'\n" % myebuild, noiselevel=-1)
5355                         portage.writemsg("!!! Please notify the package maintainer " + \
5356                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5357                         return 0
5358                 return 1
5359
5360         def _priority(self, **kwargs):
5361                 if "remove" in self.myparams:
5362                         priority_constructor = UnmergeDepPriority
5363                 else:
5364                         priority_constructor = DepPriority
5365                 return priority_constructor(**kwargs)
5366
5367         def _dep_expand(self, root_config, atom_without_category):
5368                 """
5369                 @param root_config: a root config instance
5370                 @type root_config: RootConfig
5371                 @param atom_without_category: an atom without a category component
5372                 @type atom_without_category: String
5373                 @rtype: list
5374                 @returns: a list of atoms containing categories (possibly empty)
5375                 """
5376                 null_cp = portage.dep_getkey(insert_category_into_atom(
5377                         atom_without_category, "null"))
5378                 cat, atom_pn = portage.catsplit(null_cp)
5379
5380                 dbs = self._filtered_trees[root_config.root]["dbs"]
5381                 categories = set()
5382                 for db, pkg_type, built, installed, db_keys in dbs:
5383                         for cat in db.categories:
5384                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5385                                         categories.add(cat)
5386
5387                 deps = []
5388                 for cat in categories:
5389                         deps.append(insert_category_into_atom(
5390                                 atom_without_category, cat))
5391                 return deps
5392
5393         def _have_new_virt(self, root, atom_cp):
5394                 ret = False
5395                 for db, pkg_type, built, installed, db_keys in \
5396                         self._filtered_trees[root]["dbs"]:
5397                         if db.cp_list(atom_cp):
5398                                 ret = True
5399                                 break
5400                 return ret
5401
5402         def _iter_atoms_for_pkg(self, pkg):
5403                 # TODO: add multiple $ROOT support
5404                 if pkg.root != self.target_root:
5405                         return
5406                 atom_arg_map = self._atom_arg_map
5407                 root_config = self.roots[pkg.root]
5408                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5409                         atom_cp = portage.dep_getkey(atom)
5410                         if atom_cp != pkg.cp and \
5411                                 self._have_new_virt(pkg.root, atom_cp):
5412                                 continue
5413                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5414                         visible_pkgs.reverse() # descending order
5415                         higher_slot = None
5416                         for visible_pkg in visible_pkgs:
5417                                 if visible_pkg.cp != atom_cp:
5418                                         continue
5419                                 if pkg >= visible_pkg:
5420                                         # This is descending order, and we're not
5421                                         # interested in any versions <= pkg given.
5422                                         break
5423                                 if pkg.slot_atom != visible_pkg.slot_atom:
5424                                         higher_slot = visible_pkg
5425                                         break
5426                         if higher_slot is not None:
5427                                 continue
5428                         for arg in atom_arg_map[(atom, pkg.root)]:
5429                                 if isinstance(arg, PackageArg) and \
5430                                         arg.package != pkg:
5431                                         continue
5432                                 yield arg, atom
5433
5434         def select_files(self, myfiles):
5435                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5436                 appropriate depgraph and return a favorite list."""
5437                 debug = "--debug" in self.myopts
5438                 root_config = self.roots[self.target_root]
5439                 sets = root_config.sets
5440                 getSetAtoms = root_config.setconfig.getSetAtoms
5441                 myfavorites=[]
5442                 myroot = self.target_root
5443                 dbs = self._filtered_trees[myroot]["dbs"]
5444                 vardb = self.trees[myroot]["vartree"].dbapi
5445                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5446                 portdb = self.trees[myroot]["porttree"].dbapi
5447                 bindb = self.trees[myroot]["bintree"].dbapi
5448                 pkgsettings = self.pkgsettings[myroot]
5449                 args = []
5450                 onlydeps = "--onlydeps" in self.myopts
5451                 lookup_owners = []
5452                 for x in myfiles:
5453                         ext = os.path.splitext(x)[1]
5454                         if ext==".tbz2":
5455                                 if not os.path.exists(x):
5456                                         if os.path.exists(
5457                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5458                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5459                                         elif os.path.exists(
5460                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5461                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5462                                         else:
5463                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5464                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5465                                                 return 0, myfavorites
5466                                 mytbz2=portage.xpak.tbz2(x)
5467                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5468                                 if os.path.realpath(x) != \
5469                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5470                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5471                                         return 0, myfavorites
5472                                 db_keys = list(bindb._aux_cache_keys)
5473                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5474                                 pkg = Package(type_name="binary", root_config=root_config,
5475                                         cpv=mykey, built=True, metadata=metadata,
5476                                         onlydeps=onlydeps)
5477                                 self._pkg_cache[pkg] = pkg
5478                                 args.append(PackageArg(arg=x, package=pkg,
5479                                         root_config=root_config))
5480                         elif ext==".ebuild":
5481                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5482                                 pkgdir = os.path.dirname(ebuild_path)
5483                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5484                                 cp = pkgdir[len(tree_root)+1:]
5485                                 e = portage.exception.PackageNotFound(
5486                                         ("%s is not in a valid portage tree " + \
5487                                         "hierarchy or does not exist") % x)
5488                                 if not portage.isvalidatom(cp):
5489                                         raise e
5490                                 cat = portage.catsplit(cp)[0]
5491                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5492                                 if not portage.isvalidatom("="+mykey):
5493                                         raise e
5494                                 ebuild_path = portdb.findname(mykey)
5495                                 if ebuild_path:
5496                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5497                                                 cp, os.path.basename(ebuild_path)):
5498                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5499                                                 return 0, myfavorites
5500                                         if mykey not in portdb.xmatch(
5501                                                 "match-visible", portage.dep_getkey(mykey)):
5502                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5503                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5504                                                 print colorize("BAD", "*** page for details.")
5505                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5506                                                         "Continuing...")
5507                                 else:
5508                                         raise portage.exception.PackageNotFound(
5509                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5510                                 db_keys = list(portdb._aux_cache_keys)
5511                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5512                                 pkg = Package(type_name="ebuild", root_config=root_config,
5513                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5514                                 pkgsettings.setcpv(pkg)
5515                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5516                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5517                                 self._pkg_cache[pkg] = pkg
5518                                 args.append(PackageArg(arg=x, package=pkg,
5519                                         root_config=root_config))
5520                         elif x.startswith(os.path.sep):
5521                                 if not x.startswith(myroot):
5522                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5523                                                 " $ROOT.\n") % x, noiselevel=-1)
5524                                         return 0, []
5525                                 # Queue these up since it's most efficient to handle
5526                                 # multiple files in a single iter_owners() call.
5527                                 lookup_owners.append(x)
5528                         else:
5529                                 if x in ("system", "world"):
5530                                         x = SETPREFIX + x
5531                                 if x.startswith(SETPREFIX):
5532                                         s = x[len(SETPREFIX):]
5533                                         if s not in sets:
5534                                                 raise portage.exception.PackageSetNotFound(s)
5535                                         if s in self._sets:
5536                                                 continue
5537                                         # Recursively expand sets so that containment tests in
5538                                         # self._get_parent_sets() properly match atoms in nested
5539                                         # sets (like if world contains system).
5540                                         expanded_set = InternalPackageSet(
5541                                                 initial_atoms=getSetAtoms(s))
5542                                         self._sets[s] = expanded_set
5543                                         args.append(SetArg(arg=x, set=expanded_set,
5544                                                 root_config=root_config))
5545                                         continue
5546                                 if not is_valid_package_atom(x):
5547                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5548                                                 noiselevel=-1)
5549                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5550                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5551                                         return (0,[])
5552                                 # Don't expand categories or old-style virtuals here unless
5553                                 # necessary. Expansion of old-style virtuals here causes at
5554                                 # least the following problems:
5555                                 #   1) It's more difficult to determine which set(s) an atom
5556                                 #      came from, if any.
5557                                 #   2) It takes away freedom from the resolver to choose other
5558                                 #      possible expansions when necessary.
5559                                 if "/" in x:
5560                                         args.append(AtomArg(arg=x, atom=x,
5561                                                 root_config=root_config))
5562                                         continue
5563                                 expanded_atoms = self._dep_expand(root_config, x)
5564                                 installed_cp_set = set()
5565                                 for atom in expanded_atoms:
5566                                         atom_cp = portage.dep_getkey(atom)
5567                                         if vardb.cp_list(atom_cp):
5568                                                 installed_cp_set.add(atom_cp)
5569                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5570                                         installed_cp = iter(installed_cp_set).next()
5571                                         expanded_atoms = [atom for atom in expanded_atoms \
5572                                                 if portage.dep_getkey(atom) == installed_cp]
5573
5574                                 if len(expanded_atoms) > 1:
5575                                         print
5576                                         print
5577                                         ambiguous_package_name(x, expanded_atoms, root_config,
5578                                                 self.spinner, self.myopts)
5579                                         return False, myfavorites
5580                                 if expanded_atoms:
5581                                         atom = expanded_atoms[0]
5582                                 else:
5583                                         null_atom = insert_category_into_atom(x, "null")
5584                                         null_cp = portage.dep_getkey(null_atom)
5585                                         cat, atom_pn = portage.catsplit(null_cp)
5586                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5587                                         if virts_p:
5588                                                 # Allow the depgraph to choose which virtual.
5589                                                 atom = insert_category_into_atom(x, "virtual")
5590                                         else:
5591                                                 atom = insert_category_into_atom(x, "null")
5592
5593                                 args.append(AtomArg(arg=x, atom=atom,
5594                                         root_config=root_config))
5595
5596                 if lookup_owners:
5597                         relative_paths = []
5598                         search_for_multiple = False
5599                         if len(lookup_owners) > 1:
5600                                 search_for_multiple = True
5601
5602                         for x in lookup_owners:
5603                                 if not search_for_multiple and os.path.isdir(x):
5604                                         search_for_multiple = True
5605                                 relative_paths.append(x[len(myroot):])
5606
5607                         owners = set()
5608                         for pkg, relative_path in \
5609                                 real_vardb._owners.iter_owners(relative_paths):
5610                                 owners.add(pkg.mycpv)
5611                                 if not search_for_multiple:
5612                                         break
5613
5614                         if not owners:
5615                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5616                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5617                                 return 0, []
5618
5619                         for cpv in owners:
5620                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5621                                 if not slot:
5622                                         # portage now masks packages with missing slot, but it's
5623                                         # possible that one was installed by an older version
5624                                         atom = portage.cpv_getkey(cpv)
5625                                 else:
5626                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5627                                 args.append(AtomArg(arg=atom, atom=atom,
5628                                         root_config=root_config))
5629
5630                 if "--update" in self.myopts:
5631                         # In some cases, the greedy slots behavior can pull in a slot that
5632                         # the user would want to uninstall due to it being blocked by a
5633                         # newer version in a different slot. Therefore, it's necessary to
5634                         # detect and discard any that should be uninstalled. Each time
5635                         # that arguments are updated, package selections are repeated in
5636                         # order to ensure consistency with the current arguments:
5637                         #
5638                         #  1) Initialize args
5639                         #  2) Select packages and generate initial greedy atoms
5640                         #  3) Update args with greedy atoms
5641                         #  4) Select packages and generate greedy atoms again, while
5642                         #     accounting for any blockers between selected packages
5643                         #  5) Update args with revised greedy atoms
5644
5645                         self._set_args(args)
5646                         greedy_args = []
5647                         for arg in args:
5648                                 greedy_args.append(arg)
5649                                 if not isinstance(arg, AtomArg):
5650                                         continue
5651                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5652                                         greedy_args.append(
5653                                                 AtomArg(arg=arg.arg, atom=atom,
5654                                                         root_config=arg.root_config))
5655
5656                         self._set_args(greedy_args)
5657                         del greedy_args
5658
5659                         # Revise greedy atoms, accounting for any blockers
5660                         # between selected packages.
5661                         revised_greedy_args = []
5662                         for arg in args:
5663                                 revised_greedy_args.append(arg)
5664                                 if not isinstance(arg, AtomArg):
5665                                         continue
5666                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5667                                         blocker_lookahead=True):
5668                                         revised_greedy_args.append(
5669                                                 AtomArg(arg=arg.arg, atom=atom,
5670                                                         root_config=arg.root_config))
5671                         args = revised_greedy_args
5672                         del revised_greedy_args
5673
5674                 self._set_args(args)
5675
5676                 myfavorites = set(myfavorites)
5677                 for arg in args:
5678                         if isinstance(arg, (AtomArg, PackageArg)):
5679                                 myfavorites.add(arg.atom)
5680                         elif isinstance(arg, SetArg):
5681                                 myfavorites.add(arg.arg)
5682                 myfavorites = list(myfavorites)
5683
5684                 pprovideddict = pkgsettings.pprovideddict
5685                 if debug:
5686                         portage.writemsg("\n", noiselevel=-1)
5687                 # Order needs to be preserved since a feature of --nodeps
5688                 # is to allow the user to force a specific merge order.
5689                 args.reverse()
5690                 while args:
5691                         arg = args.pop()
5692                         for atom in arg.set:
5693                                 self.spinner.update()
5694                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5695                                         root=myroot, parent=arg)
5696                                 atom_cp = portage.dep_getkey(atom)
5697                                 try:
5698                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5699                                         if pprovided and portage.match_from_list(atom, pprovided):
5700                                                 # A provided package has been specified on the command line.
5701                                                 self._pprovided_args.append((arg, atom))
5702                                                 continue
5703                                         if isinstance(arg, PackageArg):
5704                                                 if not self._add_pkg(arg.package, dep) or \
5705                                                         not self._create_graph():
5706                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5707                                                                 "dependencies for %s\n") % arg.arg)
5708                                                         return 0, myfavorites
5709                                                 continue
5710                                         if debug:
5711                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5712                                                         (arg, atom), noiselevel=-1)
5713                                         pkg, existing_node = self._select_package(
5714                                                 myroot, atom, onlydeps=onlydeps)
5715                                         if not pkg:
5716                                                 if not (isinstance(arg, SetArg) and \
5717                                                         arg.name in ("system", "world")):
5718                                                         self._unsatisfied_deps_for_display.append(
5719                                                                 ((myroot, atom), {}))
5720                                                         return 0, myfavorites
5721                                                 self._missing_args.append((arg, atom))
5722                                                 continue
5723                                         if atom_cp != pkg.cp:
5724                                                 # For old-style virtuals, we need to repeat the
5725                                                 # package.provided check against the selected package.
5726                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5727                                                 pprovided = pprovideddict.get(pkg.cp)
5728                                                 if pprovided and \
5729                                                         portage.match_from_list(expanded_atom, pprovided):
5730                                                         # A provided package has been
5731                                                         # specified on the command line.
5732                                                         self._pprovided_args.append((arg, atom))
5733                                                         continue
5734                                         if pkg.installed and "selective" not in self.myparams:
5735                                                 self._unsatisfied_deps_for_display.append(
5736                                                         ((myroot, atom), {}))
5737                                                 # Previous behavior was to bail out in this case, but
5738                                                 # since the dep is satisfied by the installed package,
5739                                                 # it's more friendly to continue building the graph
5740                                                 # and just show a warning message. Therefore, only bail
5741                                                 # out here if the atom is not from either the system or
5742                                                 # world set.
5743                                                 if not (isinstance(arg, SetArg) and \
5744                                                         arg.name in ("system", "world")):
5745                                                         return 0, myfavorites
5746
5747                                         # Add the selected package to the graph as soon as possible
5748                                         # so that later dep_check() calls can use it as feedback
5749                                         # for making more consistent atom selections.
5750                                         if not self._add_pkg(pkg, dep):
5751                                                 if isinstance(arg, SetArg):
5752                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5753                                                                 "dependencies for %s from %s\n") % \
5754                                                                 (atom, arg.arg))
5755                                                 else:
5756                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5757                                                                 "dependencies for %s\n") % atom)
5758                                                 return 0, myfavorites
5759
5760                                 except portage.exception.MissingSignature, e:
5761                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5762                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5763                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5764                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5765                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5766                                         return 0, myfavorites
5767                                 except portage.exception.InvalidSignature, e:
5768                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5769                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5770                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5771                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5772                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5773                                         return 0, myfavorites
5774                                 except SystemExit, e:
5775                                         raise # Needed else can't exit
5776                                 except Exception, e:
5777                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5778                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5779                                         raise
5780
5781                 # Now that the root packages have been added to the graph,
5782                 # process the dependencies.
5783                 if not self._create_graph():
5784                         return 0, myfavorites
5785
5786                 missing=0
5787                 if "--usepkgonly" in self.myopts:
5788                         for xs in self.digraph.all_nodes():
5789                                 if not isinstance(xs, Package):
5790                                         continue
5791                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5792                                         if missing == 0:
5793                                                 print
5794                                         missing += 1
5795                                         print "Missing binary for:",xs[2]
5796
5797                 try:
5798                         self.altlist()
5799                 except self._unknown_internal_error:
5800                         return False, myfavorites
5801
5802                 # We're true here unless we are missing binaries.
5803                 return (not missing,myfavorites)
5804
5805         def _set_args(self, args):
5806                 """
5807                 Create the "args" package set from atoms and packages given as
5808                 arguments. This method can be called multiple times if necessary.
5809                 The package selection cache is automatically invalidated, since
5810                 arguments influence package selections.
5811                 """
5812                 args_set = self._sets["args"]
5813                 args_set.clear()
5814                 for arg in args:
5815                         if not isinstance(arg, (AtomArg, PackageArg)):
5816                                 continue
5817                         atom = arg.atom
5818                         if atom in args_set:
5819                                 continue
5820                         args_set.add(atom)
5821
5822                 self._set_atoms.clear()
5823                 self._set_atoms.update(chain(*self._sets.itervalues()))
5824                 atom_arg_map = self._atom_arg_map
5825                 atom_arg_map.clear()
5826                 for arg in args:
5827                         for atom in arg.set:
5828                                 atom_key = (atom, arg.root_config.root)
5829                                 refs = atom_arg_map.get(atom_key)
5830                                 if refs is None:
5831                                         refs = []
5832                                         atom_arg_map[atom_key] = refs
5833                                         if arg not in refs:
5834                                                 refs.append(arg)
5835
5836                 # Invalidate the package selection cache, since
5837                 # arguments influence package selections.
5838                 self._highest_pkg_cache.clear()
5839                 for trees in self._filtered_trees.itervalues():
5840                         trees["porttree"].dbapi._clear_cache()
5841
5842         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5843                 """
5844                 Return a list of slot atoms corresponding to installed slots that
5845                 differ from the slot of the highest visible match. When
5846                 blocker_lookahead is True, slot atoms that would trigger a blocker
5847                 conflict are automatically discarded, potentially allowing automatic
5848                 uninstallation of older slots when appropriate.
5849                 """
5850                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5851                 if highest_pkg is None:
5852                         return []
5853                 vardb = root_config.trees["vartree"].dbapi
5854                 slots = set()
5855                 for cpv in vardb.match(atom):
5856                         # don't mix new virtuals with old virtuals
5857                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5858                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5859
5860                 slots.add(highest_pkg.metadata["SLOT"])
5861                 if len(slots) == 1:
5862                         return []
5863                 greedy_pkgs = []
5864                 slots.remove(highest_pkg.metadata["SLOT"])
5865                 while slots:
5866                         slot = slots.pop()
5867                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5868                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5869                         if pkg is not None and \
5870                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5871                                 greedy_pkgs.append(pkg)
5872                 if not greedy_pkgs:
5873                         return []
5874                 if not blocker_lookahead:
5875                         return [pkg.slot_atom for pkg in greedy_pkgs]
5876
5877                 blockers = {}
5878                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5879                 for pkg in greedy_pkgs + [highest_pkg]:
5880                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5881                         try:
5882                                 atoms = self._select_atoms(
5883                                         pkg.root, dep_str, pkg.use.enabled,
5884                                         parent=pkg, strict=True)
5885                         except portage.exception.InvalidDependString:
5886                                 continue
5887                         blocker_atoms = (x for x in atoms if x.blocker)
5888                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5889
5890                 if highest_pkg not in blockers:
5891                         return []
5892
5893                 # filter packages with invalid deps
5894                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5895
5896                 # filter packages that conflict with highest_pkg
5897                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5898                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5899                         blockers[pkg].findAtomForPackage(highest_pkg))]
5900
5901                 if not greedy_pkgs:
5902                         return []
5903
5904                 # If two packages conflict, discard the lower version.
5905                 discard_pkgs = set()
5906                 greedy_pkgs.sort(reverse=True)
5907                 for i in xrange(len(greedy_pkgs) - 1):
5908                         pkg1 = greedy_pkgs[i]
5909                         if pkg1 in discard_pkgs:
5910                                 continue
5911                         for j in xrange(i + 1, len(greedy_pkgs)):
5912                                 pkg2 = greedy_pkgs[j]
5913                                 if pkg2 in discard_pkgs:
5914                                         continue
5915                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5916                                         blockers[pkg2].findAtomForPackage(pkg1):
5917                                         # pkg1 > pkg2
5918                                         discard_pkgs.add(pkg2)
5919
5920                 return [pkg.slot_atom for pkg in greedy_pkgs \
5921                         if pkg not in discard_pkgs]
5922
5923         def _select_atoms_from_graph(self, *pargs, **kwargs):
5924                 """
5925                 Prefer atoms matching packages that have already been
5926                 added to the graph or those that are installed and have
5927                 not been scheduled for replacement.
5928                 """
5929                 kwargs["trees"] = self._graph_trees
5930                 return self._select_atoms_highest_available(*pargs, **kwargs)
5931
5932         def _select_atoms_highest_available(self, root, depstring,
5933                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5934                 """This will raise InvalidDependString if necessary. If trees is
5935                 None then self._filtered_trees is used."""
5936                 pkgsettings = self.pkgsettings[root]
5937                 if trees is None:
5938                         trees = self._filtered_trees
5939                 if not getattr(priority, "buildtime", False):
5940                         # The parent should only be passed to dep_check() for buildtime
5941                         # dependencies since that's the only case when it's appropriate
5942                         # to trigger the circular dependency avoidance code which uses it.
5943                         # It's important not to trigger the same circular dependency
5944                         # avoidance code for runtime dependencies since it's not needed
5945                         # and it can promote an incorrect package choice.
5946                         parent = None
5947                 if True:
5948                         try:
5949                                 if parent is not None:
5950                                         trees[root]["parent"] = parent
5951                                 if not strict:
5952                                         portage.dep._dep_check_strict = False
5953                                 mycheck = portage.dep_check(depstring, None,
5954                                         pkgsettings, myuse=myuse,
5955                                         myroot=root, trees=trees)
5956                         finally:
5957                                 if parent is not None:
5958                                         trees[root].pop("parent")
5959                                 portage.dep._dep_check_strict = True
5960                         if not mycheck[0]:
5961                                 raise portage.exception.InvalidDependString(mycheck[1])
5962                         selected_atoms = mycheck[1]
5963                 return selected_atoms
5964
5965         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5966                 atom = portage.dep.Atom(atom)
5967                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5968                 atom_without_use = atom
5969                 if atom.use:
5970                         atom_without_use = portage.dep.remove_slot(atom)
5971                         if atom.slot:
5972                                 atom_without_use += ":" + atom.slot
5973                         atom_without_use = portage.dep.Atom(atom_without_use)
5974                 xinfo = '"%s"' % atom
5975                 if arg:
5976                         xinfo='"%s"' % arg
5977                 # Discard null/ from failed cpv_expand category expansion.
5978                 xinfo = xinfo.replace("null/", "")
5979                 masked_packages = []
5980                 missing_use = []
5981                 masked_pkg_instances = set()
5982                 missing_licenses = []
5983                 have_eapi_mask = False
5984                 pkgsettings = self.pkgsettings[root]
5985                 implicit_iuse = pkgsettings._get_implicit_iuse()
5986                 root_config = self.roots[root]
5987                 portdb = self.roots[root].trees["porttree"].dbapi
5988                 dbs = self._filtered_trees[root]["dbs"]
5989                 for db, pkg_type, built, installed, db_keys in dbs:
5990                         if installed:
5991                                 continue
5992                         match = db.match
5993                         if hasattr(db, "xmatch"):
5994                                 cpv_list = db.xmatch("match-all", atom_without_use)
5995                         else:
5996                                 cpv_list = db.match(atom_without_use)
5997                         # descending order
5998                         cpv_list.reverse()
5999                         for cpv in cpv_list:
6000                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6001                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6002                                 if metadata is not None:
6003                                         pkg = Package(built=built, cpv=cpv,
6004                                                 installed=installed, metadata=metadata,
6005                                                 root_config=root_config)
6006                                         if pkg.cp != atom.cp:
6007                                                 # A cpv can be returned from dbapi.match() as an
6008                                                 # old-style virtual match even in cases when the
6009                                                 # package does not actually PROVIDE the virtual.
6010                                                 # Filter out any such false matches here.
6011                                                 if not atom_set.findAtomForPackage(pkg):
6012                                                         continue
6013                                         if mreasons:
6014                                                 masked_pkg_instances.add(pkg)
6015                                         if atom.use:
6016                                                 missing_use.append(pkg)
6017                                                 if not mreasons:
6018                                                         continue
6019                                 masked_packages.append(
6020                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6021
6022                 missing_use_reasons = []
6023                 missing_iuse_reasons = []
6024                 for pkg in missing_use:
6025                         use = pkg.use.enabled
6026                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6027                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6028                         missing_iuse = []
6029                         for x in atom.use.required:
6030                                 if iuse_re.match(x) is None:
6031                                         missing_iuse.append(x)
6032                         mreasons = []
6033                         if missing_iuse:
6034                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6035                                 missing_iuse_reasons.append((pkg, mreasons))
6036                         else:
6037                                 need_enable = sorted(atom.use.enabled.difference(use))
6038                                 need_disable = sorted(atom.use.disabled.intersection(use))
6039                                 if need_enable or need_disable:
6040                                         changes = []
6041                                         changes.extend(colorize("red", "+" + x) \
6042                                                 for x in need_enable)
6043                                         changes.extend(colorize("blue", "-" + x) \
6044                                                 for x in need_disable)
6045                                         mreasons.append("Change USE: %s" % " ".join(changes))
6046                                         missing_use_reasons.append((pkg, mreasons))
6047
6048                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6049                         in missing_use_reasons if pkg not in masked_pkg_instances]
6050
6051                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6052                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6053
6054                 show_missing_use = False
6055                 if unmasked_use_reasons:
6056                         # Only show the latest version.
6057                         show_missing_use = unmasked_use_reasons[:1]
6058                 elif unmasked_iuse_reasons:
6059                         if missing_use_reasons:
6060                                 # All packages with required IUSE are masked,
6061                                 # so display a normal masking message.
6062                                 pass
6063                         else:
6064                                 show_missing_use = unmasked_iuse_reasons
6065
6066                 if show_missing_use:
6067                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6068                         print "!!! One of the following packages is required to complete your request:"
6069                         for pkg, mreasons in show_missing_use:
6070                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6071
6072                 elif masked_packages:
6073                         print "\n!!! " + \
6074                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6075                                 colorize("INFORM", xinfo) + \
6076                                 colorize("BAD", " have been masked.")
6077                         print "!!! One of the following masked packages is required to complete your request:"
6078                         have_eapi_mask = show_masked_packages(masked_packages)
6079                         if have_eapi_mask:
6080                                 print
6081                                 msg = ("The current version of portage supports " + \
6082                                         "EAPI '%s'. You must upgrade to a newer version" + \
6083                                         " of portage before EAPI masked packages can" + \
6084                                         " be installed.") % portage.const.EAPI
6085                                 from textwrap import wrap
6086                                 for line in wrap(msg, 75):
6087                                         print line
6088                         print
6089                         show_mask_docs()
6090                 else:
6091                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6092
6093                 # Show parent nodes and the argument that pulled them in.
6094                 traversed_nodes = set()
6095                 node = myparent
6096                 msg = []
6097                 while node is not None:
6098                         traversed_nodes.add(node)
6099                         msg.append('(dependency required by "%s" [%s])' % \
6100                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6101                         # When traversing to parents, prefer arguments over packages
6102                         # since arguments are root nodes. Never traverse the same
6103                         # package twice, in order to prevent an infinite loop.
6104                         selected_parent = None
6105                         for parent in self.digraph.parent_nodes(node):
6106                                 if isinstance(parent, DependencyArg):
6107                                         msg.append('(dependency required by "%s" [argument])' % \
6108                                                 (colorize('INFORM', str(parent))))
6109                                         selected_parent = None
6110                                         break
6111                                 if parent not in traversed_nodes:
6112                                         selected_parent = parent
6113                         node = selected_parent
6114                 for line in msg:
6115                         print line
6116
6117                 print
6118
6119         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6120                 cache_key = (root, atom, onlydeps)
6121                 ret = self._highest_pkg_cache.get(cache_key)
6122                 if ret is not None:
6123                         pkg, existing = ret
6124                         if pkg and not existing:
6125                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6126                                 if existing and existing == pkg:
6127                                         # Update the cache to reflect that the
6128                                         # package has been added to the graph.
6129                                         ret = pkg, pkg
6130                                         self._highest_pkg_cache[cache_key] = ret
6131                         return ret
6132                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6133                 self._highest_pkg_cache[cache_key] = ret
6134                 pkg, existing = ret
6135                 if pkg is not None:
6136                         settings = pkg.root_config.settings
6137                         if visible(settings, pkg) and not (pkg.installed and \
6138                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6139                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6140                 return ret
6141
6142         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6143                 root_config = self.roots[root]
6144                 pkgsettings = self.pkgsettings[root]
6145                 dbs = self._filtered_trees[root]["dbs"]
6146                 vardb = self.roots[root].trees["vartree"].dbapi
6147                 portdb = self.roots[root].trees["porttree"].dbapi
6148                 # List of acceptable packages, ordered by type preference.
6149                 matched_packages = []
6150                 highest_version = None
6151                 if not isinstance(atom, portage.dep.Atom):
6152                         atom = portage.dep.Atom(atom)
6153                 atom_cp = atom.cp
6154                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6155                 existing_node = None
6156                 myeb = None
6157                 usepkgonly = "--usepkgonly" in self.myopts
6158                 empty = "empty" in self.myparams
6159                 selective = "selective" in self.myparams
6160                 reinstall = False
6161                 noreplace = "--noreplace" in self.myopts
6162                 # Behavior of the "selective" parameter depends on
6163                 # whether or not a package matches an argument atom.
6164                 # If an installed package provides an old-style
6165                 # virtual that is no longer provided by an available
6166                 # package, the installed package may match an argument
6167                 # atom even though none of the available packages do.
6168                 # Therefore, "selective" logic does not consider
6169                 # whether or not an installed package matches an
6170                 # argument atom. It only considers whether or not
6171                 # available packages match argument atoms, which is
6172                 # represented by the found_available_arg flag.
6173                 found_available_arg = False
6174                 for find_existing_node in True, False:
6175                         if existing_node:
6176                                 break
6177                         for db, pkg_type, built, installed, db_keys in dbs:
6178                                 if existing_node:
6179                                         break
6180                                 if installed and not find_existing_node:
6181                                         want_reinstall = reinstall or empty or \
6182                                                 (found_available_arg and not selective)
6183                                         if want_reinstall and matched_packages:
6184                                                 continue
6185                                 if hasattr(db, "xmatch"):
6186                                         cpv_list = db.xmatch("match-all", atom)
6187                                 else:
6188                                         cpv_list = db.match(atom)
6189
6190                                 # USE=multislot can make an installed package appear as if
6191                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6192                                 # won't do any good as long as USE=multislot is enabled since
6193                                 # the newly built package still won't have the expected slot.
6194                                 # Therefore, assume that such SLOT dependencies are already
6195                                 # satisfied rather than forcing a rebuild.
6196                                 if installed and not cpv_list and atom.slot:
6197                                         for cpv in db.match(atom.cp):
6198                                                 slot_available = False
6199                                                 for other_db, other_type, other_built, \
6200                                                         other_installed, other_keys in dbs:
6201                                                         try:
6202                                                                 if atom.slot == \
6203                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6204                                                                         slot_available = True
6205                                                                         break
6206                                                         except KeyError:
6207                                                                 pass
6208                                                 if not slot_available:
6209                                                         continue
6210                                                 inst_pkg = self._pkg(cpv, "installed",
6211                                                         root_config, installed=installed)
6212                                                 # Remove the slot from the atom and verify that
6213                                                 # the package matches the resulting atom.
6214                                                 atom_without_slot = portage.dep.remove_slot(atom)
6215                                                 if atom.use:
6216                                                         atom_without_slot += str(atom.use)
6217                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6218                                                 if portage.match_from_list(
6219                                                         atom_without_slot, [inst_pkg]):
6220                                                         cpv_list = [inst_pkg.cpv]
6221                                                 break
6222
6223                                 if not cpv_list:
6224                                         continue
6225                                 pkg_status = "merge"
6226                                 if installed or onlydeps:
6227                                         pkg_status = "nomerge"
6228                                 # descending order
6229                                 cpv_list.reverse()
6230                                 for cpv in cpv_list:
6231                                         # Make --noreplace take precedence over --newuse.
6232                                         if not installed and noreplace and \
6233                                                 cpv in vardb.match(atom):
6234                                                 # If the installed version is masked, it may
6235                                                 # be necessary to look at lower versions,
6236                                                 # in case there is a visible downgrade.
6237                                                 continue
6238                                         reinstall_for_flags = None
6239                                         cache_key = (pkg_type, root, cpv, pkg_status)
6240                                         calculated_use = True
6241                                         pkg = self._pkg_cache.get(cache_key)
6242                                         if pkg is None:
6243                                                 calculated_use = False
6244                                                 try:
6245                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6246                                                 except KeyError:
6247                                                         continue
6248                                                 pkg = Package(built=built, cpv=cpv,
6249                                                         installed=installed, metadata=metadata,
6250                                                         onlydeps=onlydeps, root_config=root_config,
6251                                                         type_name=pkg_type)
6252                                                 metadata = pkg.metadata
6253                                                 if not built:
6254                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6255                                                 if not built and ("?" in metadata["LICENSE"] or \
6256                                                         "?" in metadata["PROVIDE"]):
6257                                                         # This is avoided whenever possible because
6258                                                         # it's expensive. It only needs to be done here
6259                                                         # if it has an effect on visibility.
6260                                                         pkgsettings.setcpv(pkg)
6261                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6262                                                         calculated_use = True
6263                                                 self._pkg_cache[pkg] = pkg
6264
6265                                         if not installed or (built and matched_packages):
6266                                                 # Only enforce visibility on installed packages
6267                                                 # if there is at least one other visible package
6268                                                 # available. By filtering installed masked packages
6269                                                 # here, packages that have been masked since they
6270                                                 # were installed can be automatically downgraded
6271                                                 # to an unmasked version.
6272                                                 try:
6273                                                         if not visible(pkgsettings, pkg):
6274                                                                 continue
6275                                                 except portage.exception.InvalidDependString:
6276                                                         if not installed:
6277                                                                 continue
6278
6279                                                 # Enable upgrade or downgrade to a version
6280                                                 # with visible KEYWORDS when the installed
6281                                                 # version is masked by KEYWORDS, but never
6282                                                 # reinstall the same exact version only due
6283                                                 # to a KEYWORDS mask.
6284                                                 if built and matched_packages:
6285
6286                                                         different_version = None
6287                                                         for avail_pkg in matched_packages:
6288                                                                 if not portage.dep.cpvequal(
6289                                                                         pkg.cpv, avail_pkg.cpv):
6290                                                                         different_version = avail_pkg
6291                                                                         break
6292                                                         if different_version is not None:
6293
6294                                                                 if installed and \
6295                                                                         pkgsettings._getMissingKeywords(
6296                                                                         pkg.cpv, pkg.metadata):
6297                                                                         continue
6298
6299                                                                 # If the ebuild no longer exists or it's
6300                                                                 # keywords have been dropped, reject built
6301                                                                 # instances (installed or binary).
6302                                                                 # If --usepkgonly is enabled, assume that
6303                                                                 # the ebuild status should be ignored.
6304                                                                 if not usepkgonly:
6305                                                                         try:
6306                                                                                 pkg_eb = self._pkg(
6307                                                                                         pkg.cpv, "ebuild", root_config)
6308                                                                         except portage.exception.PackageNotFound:
6309                                                                                 continue
6310                                                                         else:
6311                                                                                 if not visible(pkgsettings, pkg_eb):
6312                                                                                         continue
6313
6314                                         if not pkg.built and not calculated_use:
6315                                                 # This is avoided whenever possible because
6316                                                 # it's expensive.
6317                                                 pkgsettings.setcpv(pkg)
6318                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6319
6320                                         if pkg.cp != atom.cp:
6321                                                 # A cpv can be returned from dbapi.match() as an
6322                                                 # old-style virtual match even in cases when the
6323                                                 # package does not actually PROVIDE the virtual.
6324                                                 # Filter out any such false matches here.
6325                                                 if not atom_set.findAtomForPackage(pkg):
6326                                                         continue
6327
6328                                         myarg = None
6329                                         if root == self.target_root:
6330                                                 try:
6331                                                         # Ebuild USE must have been calculated prior
6332                                                         # to this point, in case atoms have USE deps.
6333                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6334                                                 except StopIteration:
6335                                                         pass
6336                                                 except portage.exception.InvalidDependString:
6337                                                         if not installed:
6338                                                                 # masked by corruption
6339                                                                 continue
6340                                         if not installed and myarg:
6341                                                 found_available_arg = True
6342
6343                                         if atom.use and not pkg.built:
6344                                                 use = pkg.use.enabled
6345                                                 if atom.use.enabled.difference(use):
6346                                                         continue
6347                                                 if atom.use.disabled.intersection(use):
6348                                                         continue
6349                                         if pkg.cp == atom_cp:
6350                                                 if highest_version is None:
6351                                                         highest_version = pkg
6352                                                 elif pkg > highest_version:
6353                                                         highest_version = pkg
6354                                         # At this point, we've found the highest visible
6355                                         # match from the current repo. Any lower versions
6356                                         # from this repo are ignored, so this so the loop
6357                                         # will always end with a break statement below
6358                                         # this point.
6359                                         if find_existing_node:
6360                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6361                                                 if not e_pkg:
6362                                                         break
6363                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6364                                                         if highest_version and \
6365                                                                 e_pkg.cp == atom_cp and \
6366                                                                 e_pkg < highest_version and \
6367                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6368                                                                 # There is a higher version available in a
6369                                                                 # different slot, so this existing node is
6370                                                                 # irrelevant.
6371                                                                 pass
6372                                                         else:
6373                                                                 matched_packages.append(e_pkg)
6374                                                                 existing_node = e_pkg
6375                                                 break
6376                                         # Compare built package to current config and
6377                                         # reject the built package if necessary.
6378                                         if built and not installed and \
6379                                                 ("--newuse" in self.myopts or \
6380                                                 "--reinstall" in self.myopts):
6381                                                 iuses = pkg.iuse.all
6382                                                 old_use = pkg.use.enabled
6383                                                 if myeb:
6384                                                         pkgsettings.setcpv(myeb)
6385                                                 else:
6386                                                         pkgsettings.setcpv(pkg)
6387                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6388                                                 forced_flags = set()
6389                                                 forced_flags.update(pkgsettings.useforce)
6390                                                 forced_flags.update(pkgsettings.usemask)
6391                                                 cur_iuse = iuses
6392                                                 if myeb and not usepkgonly:
6393                                                         cur_iuse = myeb.iuse.all
6394                                                 if self._reinstall_for_flags(forced_flags,
6395                                                         old_use, iuses,
6396                                                         now_use, cur_iuse):
6397                                                         break
6398                                         # Compare current config to installed package
6399                                         # and do not reinstall if possible.
6400                                         if not installed and \
6401                                                 ("--newuse" in self.myopts or \
6402                                                 "--reinstall" in self.myopts) and \
6403                                                 cpv in vardb.match(atom):
6404                                                 pkgsettings.setcpv(pkg)
6405                                                 forced_flags = set()
6406                                                 forced_flags.update(pkgsettings.useforce)
6407                                                 forced_flags.update(pkgsettings.usemask)
6408                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6409                                                 old_iuse = set(filter_iuse_defaults(
6410                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6411                                                 cur_use = pkg.use.enabled
6412                                                 cur_iuse = pkg.iuse.all
6413                                                 reinstall_for_flags = \
6414                                                         self._reinstall_for_flags(
6415                                                         forced_flags, old_use, old_iuse,
6416                                                         cur_use, cur_iuse)
6417                                                 if reinstall_for_flags:
6418                                                         reinstall = True
6419                                         if not built:
6420                                                 myeb = pkg
6421                                         matched_packages.append(pkg)
6422                                         if reinstall_for_flags:
6423                                                 self._reinstall_nodes[pkg] = \
6424                                                         reinstall_for_flags
6425                                         break
6426
6427                 if not matched_packages:
6428                         return None, None
6429
6430                 if "--debug" in self.myopts:
6431                         for pkg in matched_packages:
6432                                 portage.writemsg("%s %s\n" % \
6433                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6434
6435                 # Filter out any old-style virtual matches if they are
6436                 # mixed with new-style virtual matches.
6437                 cp = portage.dep_getkey(atom)
6438                 if len(matched_packages) > 1 and \
6439                         "virtual" == portage.catsplit(cp)[0]:
6440                         for pkg in matched_packages:
6441                                 if pkg.cp != cp:
6442                                         continue
6443                                 # Got a new-style virtual, so filter
6444                                 # out any old-style virtuals.
6445                                 matched_packages = [pkg for pkg in matched_packages \
6446                                         if pkg.cp == cp]
6447                                 break
6448
6449                 if len(matched_packages) > 1:
6450                         bestmatch = portage.best(
6451                                 [pkg.cpv for pkg in matched_packages])
6452                         matched_packages = [pkg for pkg in matched_packages \
6453                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6454
6455                 # ordered by type preference ("ebuild" type is the last resort)
6456                 return  matched_packages[-1], existing_node
6457
6458         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6459                 """
6460                 Select packages that have already been added to the graph or
6461                 those that are installed and have not been scheduled for
6462                 replacement.
6463                 """
6464                 graph_db = self._graph_trees[root]["porttree"].dbapi
6465                 matches = graph_db.match_pkgs(atom)
6466                 if not matches:
6467                         return None, None
6468                 pkg = matches[-1] # highest match
6469                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6470                 return pkg, in_graph
6471
6472         def _complete_graph(self):
6473                 """
6474                 Add any deep dependencies of required sets (args, system, world) that
6475                 have not been pulled into the graph yet. This ensures that the graph
6476                 is consistent such that initially satisfied deep dependencies are not
6477                 broken in the new graph. Initially unsatisfied dependencies are
6478                 irrelevant since we only want to avoid breaking dependencies that are
6479                 intially satisfied.
6480
6481                 Since this method can consume enough time to disturb users, it is
6482                 currently only enabled by the --complete-graph option.
6483                 """
6484                 if "--buildpkgonly" in self.myopts or \
6485                         "recurse" not in self.myparams:
6486                         return 1
6487
6488                 if "complete" not in self.myparams:
6489                         # Skip this to avoid consuming enough time to disturb users.
6490                         return 1
6491
6492                 # Put the depgraph into a mode that causes it to only
6493                 # select packages that have already been added to the
6494                 # graph or those that are installed and have not been
6495                 # scheduled for replacement. Also, toggle the "deep"
6496                 # parameter so that all dependencies are traversed and
6497                 # accounted for.
6498                 self._select_atoms = self._select_atoms_from_graph
6499                 self._select_package = self._select_pkg_from_graph
6500                 already_deep = "deep" in self.myparams
6501                 if not already_deep:
6502                         self.myparams.add("deep")
6503
6504                 for root in self.roots:
6505                         required_set_names = self._required_set_names.copy()
6506                         if root == self.target_root and \
6507                                 (already_deep or "empty" in self.myparams):
6508                                 required_set_names.difference_update(self._sets)
6509                         if not required_set_names and not self._ignored_deps:
6510                                 continue
6511                         root_config = self.roots[root]
6512                         setconfig = root_config.setconfig
6513                         args = []
6514                         # Reuse existing SetArg instances when available.
6515                         for arg in self.digraph.root_nodes():
6516                                 if not isinstance(arg, SetArg):
6517                                         continue
6518                                 if arg.root_config != root_config:
6519                                         continue
6520                                 if arg.name in required_set_names:
6521                                         args.append(arg)
6522                                         required_set_names.remove(arg.name)
6523                         # Create new SetArg instances only when necessary.
6524                         for s in required_set_names:
6525                                 expanded_set = InternalPackageSet(
6526                                         initial_atoms=setconfig.getSetAtoms(s))
6527                                 atom = SETPREFIX + s
6528                                 args.append(SetArg(arg=atom, set=expanded_set,
6529                                         root_config=root_config))
6530                         vardb = root_config.trees["vartree"].dbapi
6531                         for arg in args:
6532                                 for atom in arg.set:
6533                                         self._dep_stack.append(
6534                                                 Dependency(atom=atom, root=root, parent=arg))
6535                         if self._ignored_deps:
6536                                 self._dep_stack.extend(self._ignored_deps)
6537                                 self._ignored_deps = []
6538                         if not self._create_graph(allow_unsatisfied=True):
6539                                 return 0
6540                         # Check the unsatisfied deps to see if any initially satisfied deps
6541                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6542                         # deps are irrelevant since we only want to avoid breaking deps
6543                         # that are initially satisfied.
6544                         while self._unsatisfied_deps:
6545                                 dep = self._unsatisfied_deps.pop()
6546                                 matches = vardb.match_pkgs(dep.atom)
6547                                 if not matches:
6548                                         self._initially_unsatisfied_deps.append(dep)
6549                                         continue
6550                                 # An scheduled installation broke a deep dependency.
6551                                 # Add the installed package to the graph so that it
6552                                 # will be appropriately reported as a slot collision
6553                                 # (possibly solvable via backtracking).
6554                                 pkg = matches[-1] # highest match
6555                                 if not self._add_pkg(pkg, dep):
6556                                         return 0
6557                                 if not self._create_graph(allow_unsatisfied=True):
6558                                         return 0
6559                 return 1
6560
6561         def _pkg(self, cpv, type_name, root_config, installed=False):
6562                 """
6563                 Get a package instance from the cache, or create a new
6564                 one if necessary. Raises KeyError from aux_get if it
6565                 failures for some reason (package does not exist or is
6566                 corrupt).
6567                 """
6568                 operation = "merge"
6569                 if installed:
6570                         operation = "nomerge"
6571                 pkg = self._pkg_cache.get(
6572                         (type_name, root_config.root, cpv, operation))
6573                 if pkg is None:
6574                         tree_type = self.pkg_tree_map[type_name]
6575                         db = root_config.trees[tree_type].dbapi
6576                         db_keys = list(self._trees_orig[root_config.root][
6577                                 tree_type].dbapi._aux_cache_keys)
6578                         try:
6579                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6580                         except KeyError:
6581                                 raise portage.exception.PackageNotFound(cpv)
6582                         pkg = Package(cpv=cpv, metadata=metadata,
6583                                 root_config=root_config, installed=installed)
6584                         if type_name == "ebuild":
6585                                 settings = self.pkgsettings[root_config.root]
6586                                 settings.setcpv(pkg)
6587                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6588                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6589                         self._pkg_cache[pkg] = pkg
6590                 return pkg
6591
6592         def validate_blockers(self):
6593                 """Remove any blockers from the digraph that do not match any of the
6594                 packages within the graph.  If necessary, create hard deps to ensure
6595                 correct merge order such that mutually blocking packages are never
6596                 installed simultaneously."""
6597
6598                 if "--buildpkgonly" in self.myopts or \
6599                         "--nodeps" in self.myopts:
6600                         return True
6601
6602                 #if "deep" in self.myparams:
6603                 if True:
6604                         # Pull in blockers from all installed packages that haven't already
6605                         # been pulled into the depgraph.  This is not enabled by default
6606                         # due to the performance penalty that is incurred by all the
6607                         # additional dep_check calls that are required.
6608
6609                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6610                         for myroot in self.trees:
6611                                 vardb = self.trees[myroot]["vartree"].dbapi
6612                                 portdb = self.trees[myroot]["porttree"].dbapi
6613                                 pkgsettings = self.pkgsettings[myroot]
6614                                 final_db = self.mydbapi[myroot]
6615
6616                                 blocker_cache = BlockerCache(myroot, vardb)
6617                                 stale_cache = set(blocker_cache)
6618                                 for pkg in vardb:
6619                                         cpv = pkg.cpv
6620                                         stale_cache.discard(cpv)
6621                                         pkg_in_graph = self.digraph.contains(pkg)
6622
6623                                         # Check for masked installed packages. Only warn about
6624                                         # packages that are in the graph in order to avoid warning
6625                                         # about those that will be automatically uninstalled during
6626                                         # the merge process or by --depclean.
6627                                         if pkg in final_db:
6628                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6629                                                         self._masked_installed.add(pkg)
6630
6631                                         blocker_atoms = None
6632                                         blockers = None
6633                                         if pkg_in_graph:
6634                                                 blockers = []
6635                                                 try:
6636                                                         blockers.extend(
6637                                                                 self._blocker_parents.child_nodes(pkg))
6638                                                 except KeyError:
6639                                                         pass
6640                                                 try:
6641                                                         blockers.extend(
6642                                                                 self._irrelevant_blockers.child_nodes(pkg))
6643                                                 except KeyError:
6644                                                         pass
6645                                         if blockers is not None:
6646                                                 blockers = set(str(blocker.atom) \
6647                                                         for blocker in blockers)
6648
6649                                         # If this node has any blockers, create a "nomerge"
6650                                         # node for it so that they can be enforced.
6651                                         self.spinner.update()
6652                                         blocker_data = blocker_cache.get(cpv)
6653                                         if blocker_data is not None and \
6654                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6655                                                 blocker_data = None
6656
6657                                         # If blocker data from the graph is available, use
6658                                         # it to validate the cache and update the cache if
6659                                         # it seems invalid.
6660                                         if blocker_data is not None and \
6661                                                 blockers is not None:
6662                                                 if not blockers.symmetric_difference(
6663                                                         blocker_data.atoms):
6664                                                         continue
6665                                                 blocker_data = None
6666
6667                                         if blocker_data is None and \
6668                                                 blockers is not None:
6669                                                 # Re-use the blockers from the graph.
6670                                                 blocker_atoms = sorted(blockers)
6671                                                 counter = long(pkg.metadata["COUNTER"])
6672                                                 blocker_data = \
6673                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6674                                                 blocker_cache[pkg.cpv] = blocker_data
6675                                                 continue
6676
6677                                         if blocker_data:
6678                                                 blocker_atoms = blocker_data.atoms
6679                                         else:
6680                                                 # Use aux_get() to trigger FakeVartree global
6681                                                 # updates on *DEPEND when appropriate.
6682                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6683                                                 # It is crucial to pass in final_db here in order to
6684                                                 # optimize dep_check calls by eliminating atoms via
6685                                                 # dep_wordreduce and dep_eval calls.
6686                                                 try:
6687                                                         portage.dep._dep_check_strict = False
6688                                                         try:
6689                                                                 success, atoms = portage.dep_check(depstr,
6690                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6691                                                                         trees=self._graph_trees, myroot=myroot)
6692                                                         except Exception, e:
6693                                                                 if isinstance(e, SystemExit):
6694                                                                         raise
6695                                                                 # This is helpful, for example, if a ValueError
6696                                                                 # is thrown from cpv_expand due to multiple
6697                                                                 # matches (this can happen if an atom lacks a
6698                                                                 # category).
6699                                                                 show_invalid_depstring_notice(
6700                                                                         pkg, depstr, str(e))
6701                                                                 del e
6702                                                                 raise
6703                                                 finally:
6704                                                         portage.dep._dep_check_strict = True
6705                                                 if not success:
6706                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6707                                                         if replacement_pkg and \
6708                                                                 replacement_pkg[0].operation == "merge":
6709                                                                 # This package is being replaced anyway, so
6710                                                                 # ignore invalid dependencies so as not to
6711                                                                 # annoy the user too much (otherwise they'd be
6712                                                                 # forced to manually unmerge it first).
6713                                                                 continue
6714                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6715                                                         return False
6716                                                 blocker_atoms = [myatom for myatom in atoms \
6717                                                         if myatom.startswith("!")]
6718                                                 blocker_atoms.sort()
6719                                                 counter = long(pkg.metadata["COUNTER"])
6720                                                 blocker_cache[cpv] = \
6721                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6722                                         if blocker_atoms:
6723                                                 try:
6724                                                         for atom in blocker_atoms:
6725                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6726                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6727                                                                 self._blocker_parents.add(blocker, pkg)
6728                                                 except portage.exception.InvalidAtom, e:
6729                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6730                                                         show_invalid_depstring_notice(
6731                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6732                                                         return False
6733                                 for cpv in stale_cache:
6734                                         del blocker_cache[cpv]
6735                                 blocker_cache.flush()
6736                                 del blocker_cache
6737
6738                 # Discard any "uninstall" tasks scheduled by previous calls
6739                 # to this method, since those tasks may not make sense given
6740                 # the current graph state.
6741                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6742                 if previous_uninstall_tasks:
6743                         self._blocker_uninstalls = digraph()
6744                         self.digraph.difference_update(previous_uninstall_tasks)
6745
6746                 for blocker in self._blocker_parents.leaf_nodes():
6747                         self.spinner.update()
6748                         root_config = self.roots[blocker.root]
6749                         virtuals = root_config.settings.getvirtuals()
6750                         myroot = blocker.root
6751                         initial_db = self.trees[myroot]["vartree"].dbapi
6752                         final_db = self.mydbapi[myroot]
6753                         
6754                         provider_virtual = False
6755                         if blocker.cp in virtuals and \
6756                                 not self._have_new_virt(blocker.root, blocker.cp):
6757                                 provider_virtual = True
6758
6759                         # Use this to check PROVIDE for each matched package
6760                         # when necessary.
6761                         atom_set = InternalPackageSet(
6762                                 initial_atoms=[blocker.atom])
6763
6764                         if provider_virtual:
6765                                 atoms = []
6766                                 for provider_entry in virtuals[blocker.cp]:
6767                                         provider_cp = \
6768                                                 portage.dep_getkey(provider_entry)
6769                                         atoms.append(blocker.atom.replace(
6770                                                 blocker.cp, provider_cp))
6771                         else:
6772                                 atoms = [blocker.atom]
6773
6774                         blocked_initial = set()
6775                         for atom in atoms:
6776                                 for pkg in initial_db.match_pkgs(atom):
6777                                         if atom_set.findAtomForPackage(pkg):
6778                                                 blocked_initial.add(pkg)
6779
6780                         blocked_final = set()
6781                         for atom in atoms:
6782                                 for pkg in final_db.match_pkgs(atom):
6783                                         if atom_set.findAtomForPackage(pkg):
6784                                                 blocked_final.add(pkg)
6785
6786                         if not blocked_initial and not blocked_final:
6787                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6788                                 self._blocker_parents.remove(blocker)
6789                                 # Discard any parents that don't have any more blockers.
6790                                 for pkg in parent_pkgs:
6791                                         self._irrelevant_blockers.add(blocker, pkg)
6792                                         if not self._blocker_parents.child_nodes(pkg):
6793                                                 self._blocker_parents.remove(pkg)
6794                                 continue
6795                         for parent in self._blocker_parents.parent_nodes(blocker):
6796                                 unresolved_blocks = False
6797                                 depends_on_order = set()
6798                                 for pkg in blocked_initial:
6799                                         if pkg.slot_atom == parent.slot_atom:
6800                                                 # TODO: Support blocks within slots in cases where it
6801                                                 # might make sense.  For example, a new version might
6802                                                 # require that the old version be uninstalled at build
6803                                                 # time.
6804                                                 continue
6805                                         if parent.installed:
6806                                                 # Two currently installed packages conflict with
6807                                                 # eachother. Ignore this case since the damage
6808                                                 # is already done and this would be likely to
6809                                                 # confuse users if displayed like a normal blocker.
6810                                                 continue
6811
6812                                         self._blocked_pkgs.add(pkg, blocker)
6813
6814                                         if parent.operation == "merge":
6815                                                 # Maybe the blocked package can be replaced or simply
6816                                                 # unmerged to resolve this block.
6817                                                 depends_on_order.add((pkg, parent))
6818                                                 continue
6819                                         # None of the above blocker resolutions techniques apply,
6820                                         # so apparently this one is unresolvable.
6821                                         unresolved_blocks = True
6822                                 for pkg in blocked_final:
6823                                         if pkg.slot_atom == parent.slot_atom:
6824                                                 # TODO: Support blocks within slots.
6825                                                 continue
6826                                         if parent.operation == "nomerge" and \
6827                                                 pkg.operation == "nomerge":
6828                                                 # This blocker will be handled the next time that a
6829                                                 # merge of either package is triggered.
6830                                                 continue
6831
6832                                         self._blocked_pkgs.add(pkg, blocker)
6833
6834                                         # Maybe the blocking package can be
6835                                         # unmerged to resolve this block.
6836                                         if parent.operation == "merge" and pkg.installed:
6837                                                 depends_on_order.add((pkg, parent))
6838                                                 continue
6839                                         elif parent.operation == "nomerge":
6840                                                 depends_on_order.add((parent, pkg))
6841                                                 continue
6842                                         # None of the above blocker resolutions techniques apply,
6843                                         # so apparently this one is unresolvable.
6844                                         unresolved_blocks = True
6845
6846                                 # Make sure we don't unmerge any package that have been pulled
6847                                 # into the graph.
6848                                 if not unresolved_blocks and depends_on_order:
6849                                         for inst_pkg, inst_task in depends_on_order:
6850                                                 if self.digraph.contains(inst_pkg) and \
6851                                                         self.digraph.parent_nodes(inst_pkg):
6852                                                         unresolved_blocks = True
6853                                                         break
6854
6855                                 if not unresolved_blocks and depends_on_order:
6856                                         for inst_pkg, inst_task in depends_on_order:
6857                                                 uninst_task = Package(built=inst_pkg.built,
6858                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6859                                                         metadata=inst_pkg.metadata,
6860                                                         operation="uninstall",
6861                                                         root_config=inst_pkg.root_config,
6862                                                         type_name=inst_pkg.type_name)
6863                                                 self._pkg_cache[uninst_task] = uninst_task
6864                                                 # Enforce correct merge order with a hard dep.
6865                                                 self.digraph.addnode(uninst_task, inst_task,
6866                                                         priority=BlockerDepPriority.instance)
6867                                                 # Count references to this blocker so that it can be
6868                                                 # invalidated after nodes referencing it have been
6869                                                 # merged.
6870                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6871                                 if not unresolved_blocks and not depends_on_order:
6872                                         self._irrelevant_blockers.add(blocker, parent)
6873                                         self._blocker_parents.remove_edge(blocker, parent)
6874                                         if not self._blocker_parents.parent_nodes(blocker):
6875                                                 self._blocker_parents.remove(blocker)
6876                                         if not self._blocker_parents.child_nodes(parent):
6877                                                 self._blocker_parents.remove(parent)
6878                                 if unresolved_blocks:
6879                                         self._unsolvable_blockers.add(blocker, parent)
6880
6881                 return True
6882
6883         def _accept_blocker_conflicts(self):
6884                 acceptable = False
6885                 for x in ("--buildpkgonly", "--fetchonly",
6886                         "--fetch-all-uri", "--nodeps"):
6887                         if x in self.myopts:
6888                                 acceptable = True
6889                                 break
6890                 return acceptable
6891
6892         def _merge_order_bias(self, mygraph):
6893                 """
6894                 For optimal leaf node selection, promote deep system runtime deps and
6895                 order nodes from highest to lowest overall reference count.
6896                 """
6897
6898                 node_info = {}
6899                 for node in mygraph.order:
6900                         node_info[node] = len(mygraph.parent_nodes(node))
6901                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6902
6903                 def cmp_merge_preference(node1, node2):
6904
6905                         if node1.operation == 'uninstall':
6906                                 if node2.operation == 'uninstall':
6907                                         return 0
6908                                 return 1
6909
6910                         if node2.operation == 'uninstall':
6911                                 if node1.operation == 'uninstall':
6912                                         return 0
6913                                 return -1
6914
6915                         node1_sys = node1 in deep_system_deps
6916                         node2_sys = node2 in deep_system_deps
6917                         if node1_sys != node2_sys:
6918                                 if node1_sys:
6919                                         return -1
6920                                 return 1
6921
6922                         return node_info[node2] - node_info[node1]
6923
6924                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6925
6926         def altlist(self, reversed=False):
6927
6928                 while self._serialized_tasks_cache is None:
6929                         self._resolve_conflicts()
6930                         try:
6931                                 self._serialized_tasks_cache, self._scheduler_graph = \
6932                                         self._serialize_tasks()
6933                         except self._serialize_tasks_retry:
6934                                 pass
6935
6936                 retlist = self._serialized_tasks_cache[:]
6937                 if reversed:
6938                         retlist.reverse()
6939                 return retlist
6940
6941         def schedulerGraph(self):
6942                 """
6943                 The scheduler graph is identical to the normal one except that
6944                 uninstall edges are reversed in specific cases that require
6945                 conflicting packages to be temporarily installed simultaneously.
6946                 This is intended for use by the Scheduler in it's parallelization
6947                 logic. It ensures that temporary simultaneous installation of
6948                 conflicting packages is avoided when appropriate (especially for
6949                 !!atom blockers), but allowed in specific cases that require it.
6950
6951                 Note that this method calls break_refs() which alters the state of
6952                 internal Package instances such that this depgraph instance should
6953                 not be used to perform any more calculations.
6954                 """
6955                 if self._scheduler_graph is None:
6956                         self.altlist()
6957                 self.break_refs(self._scheduler_graph.order)
6958                 return self._scheduler_graph
6959
6960         def break_refs(self, nodes):
6961                 """
6962                 Take a mergelist like that returned from self.altlist() and
6963                 break any references that lead back to the depgraph. This is
6964                 useful if you want to hold references to packages without
6965                 also holding the depgraph on the heap.
6966                 """
6967                 for node in nodes:
6968                         if hasattr(node, "root_config"):
6969                                 # The FakeVartree references the _package_cache which
6970                                 # references the depgraph. So that Package instances don't
6971                                 # hold the depgraph and FakeVartree on the heap, replace
6972                                 # the RootConfig that references the FakeVartree with the
6973                                 # original RootConfig instance which references the actual
6974                                 # vartree.
6975                                 node.root_config = \
6976                                         self._trees_orig[node.root_config.root]["root_config"]
6977
6978         def _resolve_conflicts(self):
6979                 if not self._complete_graph():
6980                         raise self._unknown_internal_error()
6981
6982                 if not self.validate_blockers():
6983                         raise self._unknown_internal_error()
6984
6985                 if self._slot_collision_info:
6986                         self._process_slot_conflicts()
6987
6988         def _serialize_tasks(self):
6989
6990                 if "--debug" in self.myopts:
6991                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6992                         self.digraph.debug_print()
6993                         writemsg("\n", noiselevel=-1)
6994
6995                 scheduler_graph = self.digraph.copy()
6996                 mygraph=self.digraph.copy()
6997                 # Prune "nomerge" root nodes if nothing depends on them, since
6998                 # otherwise they slow down merge order calculation. Don't remove
6999                 # non-root nodes since they help optimize merge order in some cases
7000                 # such as revdep-rebuild.
7001                 removed_nodes = set()
7002                 while True:
7003                         for node in mygraph.root_nodes():
7004                                 if not isinstance(node, Package) or \
7005                                         node.installed or node.onlydeps:
7006                                         removed_nodes.add(node)
7007                         if removed_nodes:
7008                                 self.spinner.update()
7009                                 mygraph.difference_update(removed_nodes)
7010                         if not removed_nodes:
7011                                 break
7012                         removed_nodes.clear()
7013                 self._merge_order_bias(mygraph)
7014                 def cmp_circular_bias(n1, n2):
7015                         """
7016                         RDEPEND is stronger than PDEPEND and this function
7017                         measures such a strength bias within a circular
7018                         dependency relationship.
7019                         """
7020                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7021                                 ignore_priority=priority_range.ignore_medium_soft)
7022                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7023                                 ignore_priority=priority_range.ignore_medium_soft)
7024                         if n1_n2_medium == n2_n1_medium:
7025                                 return 0
7026                         elif n1_n2_medium:
7027                                 return 1
7028                         return -1
7029                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7030                 retlist=[]
7031                 # Contains uninstall tasks that have been scheduled to
7032                 # occur after overlapping blockers have been installed.
7033                 scheduled_uninstalls = set()
7034                 # Contains any Uninstall tasks that have been ignored
7035                 # in order to avoid the circular deps code path. These
7036                 # correspond to blocker conflicts that could not be
7037                 # resolved.
7038                 ignored_uninstall_tasks = set()
7039                 have_uninstall_task = False
7040                 complete = "complete" in self.myparams
7041                 asap_nodes = []
7042
7043                 def get_nodes(**kwargs):
7044                         """
7045                         Returns leaf nodes excluding Uninstall instances
7046                         since those should be executed as late as possible.
7047                         """
7048                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7049                                 if isinstance(node, Package) and \
7050                                         (node.operation != "uninstall" or \
7051                                         node in scheduled_uninstalls)]
7052
7053                 # sys-apps/portage needs special treatment if ROOT="/"
7054                 running_root = self._running_root.root
7055                 from portage.const import PORTAGE_PACKAGE_ATOM
7056                 runtime_deps = InternalPackageSet(
7057                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7058                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7059                         PORTAGE_PACKAGE_ATOM)
7060                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7061                         PORTAGE_PACKAGE_ATOM)
7062
7063                 if running_portage:
7064                         running_portage = running_portage[0]
7065                 else:
7066                         running_portage = None
7067
7068                 if replacement_portage:
7069                         replacement_portage = replacement_portage[0]
7070                 else:
7071                         replacement_portage = None
7072
7073                 if replacement_portage == running_portage:
7074                         replacement_portage = None
7075
7076                 if replacement_portage is not None:
7077                         # update from running_portage to replacement_portage asap
7078                         asap_nodes.append(replacement_portage)
7079
7080                 if running_portage is not None:
7081                         try:
7082                                 portage_rdepend = self._select_atoms_highest_available(
7083                                         running_root, running_portage.metadata["RDEPEND"],
7084                                         myuse=running_portage.use.enabled,
7085                                         parent=running_portage, strict=False)
7086                         except portage.exception.InvalidDependString, e:
7087                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7088                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7089                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7090                                 del e
7091                                 portage_rdepend = []
7092                         runtime_deps.update(atom for atom in portage_rdepend \
7093                                 if not atom.startswith("!"))
7094
7095                 def gather_deps(ignore_priority, mergeable_nodes,
7096                         selected_nodes, node):
7097                         """
7098                         Recursively gather a group of nodes that RDEPEND on
7099                         eachother. This ensures that they are merged as a group
7100                         and get their RDEPENDs satisfied as soon as possible.
7101                         """
7102                         if node in selected_nodes:
7103                                 return True
7104                         if node not in mergeable_nodes:
7105                                 return False
7106                         if node == replacement_portage and \
7107                                 mygraph.child_nodes(node,
7108                                 ignore_priority=priority_range.ignore_medium_soft):
7109                                 # Make sure that portage always has all of it's
7110                                 # RDEPENDs installed first.
7111                                 return False
7112                         selected_nodes.add(node)
7113                         for child in mygraph.child_nodes(node,
7114                                 ignore_priority=ignore_priority):
7115                                 if not gather_deps(ignore_priority,
7116                                         mergeable_nodes, selected_nodes, child):
7117                                         return False
7118                         return True
7119
7120                 def ignore_uninst_or_med(priority):
7121                         if priority is BlockerDepPriority.instance:
7122                                 return True
7123                         return priority_range.ignore_medium(priority)
7124
7125                 def ignore_uninst_or_med_soft(priority):
7126                         if priority is BlockerDepPriority.instance:
7127                                 return True
7128                         return priority_range.ignore_medium_soft(priority)
7129
7130                 tree_mode = "--tree" in self.myopts
7131                 # Tracks whether or not the current iteration should prefer asap_nodes
7132                 # if available.  This is set to False when the previous iteration
7133                 # failed to select any nodes.  It is reset whenever nodes are
7134                 # successfully selected.
7135                 prefer_asap = True
7136
7137                 # Controls whether or not the current iteration should drop edges that
7138                 # are "satisfied" by installed packages, in order to solve circular
7139                 # dependencies. The deep runtime dependencies of installed packages are
7140                 # not checked in this case (bug #199856), so it must be avoided
7141                 # whenever possible.
7142                 drop_satisfied = False
7143
7144                 # State of variables for successive iterations that loosen the
7145                 # criteria for node selection.
7146                 #
7147                 # iteration   prefer_asap   drop_satisfied
7148                 # 1           True          False
7149                 # 2           False         False
7150                 # 3           False         True
7151                 #
7152                 # If no nodes are selected on the last iteration, it is due to
7153                 # unresolved blockers or circular dependencies.
7154
7155                 while not mygraph.empty():
7156                         self.spinner.update()
7157                         selected_nodes = None
7158                         ignore_priority = None
7159                         if drop_satisfied or (prefer_asap and asap_nodes):
7160                                 priority_range = DepPrioritySatisfiedRange
7161                         else:
7162                                 priority_range = DepPriorityNormalRange
7163                         if prefer_asap and asap_nodes:
7164                                 # ASAP nodes are merged before their soft deps. Go ahead and
7165                                 # select root nodes here if necessary, since it's typical for
7166                                 # the parent to have been removed from the graph already.
7167                                 asap_nodes = [node for node in asap_nodes \
7168                                         if mygraph.contains(node)]
7169                                 for node in asap_nodes:
7170                                         if not mygraph.child_nodes(node,
7171                                                 ignore_priority=priority_range.ignore_soft):
7172                                                 selected_nodes = [node]
7173                                                 asap_nodes.remove(node)
7174                                                 break
7175                         if not selected_nodes and \
7176                                 not (prefer_asap and asap_nodes):
7177                                 for i in xrange(priority_range.NONE,
7178                                         priority_range.MEDIUM_SOFT + 1):
7179                                         ignore_priority = priority_range.ignore_priority[i]
7180                                         nodes = get_nodes(ignore_priority=ignore_priority)
7181                                         if nodes:
7182                                                 # If there is a mix of uninstall nodes with other
7183                                                 # types, save the uninstall nodes for later since
7184                                                 # sometimes a merge node will render an uninstall
7185                                                 # node unnecessary (due to occupying the same slot),
7186                                                 # and we want to avoid executing a separate uninstall
7187                                                 # task in that case.
7188                                                 if len(nodes) > 1:
7189                                                         good_uninstalls = []
7190                                                         with_some_uninstalls_excluded = []
7191                                                         for node in nodes:
7192                                                                 if node.operation == "uninstall":
7193                                                                         slot_node = self.mydbapi[node.root
7194                                                                                 ].match_pkgs(node.slot_atom)
7195                                                                         if slot_node and \
7196                                                                                 slot_node[0].operation == "merge":
7197                                                                                 continue
7198                                                                         good_uninstalls.append(node)
7199                                                                 with_some_uninstalls_excluded.append(node)
7200                                                         if good_uninstalls:
7201                                                                 nodes = good_uninstalls
7202                                                         elif with_some_uninstalls_excluded:
7203                                                                 nodes = with_some_uninstalls_excluded
7204                                                         else:
7205                                                                 nodes = nodes
7206
7207                                                 if ignore_priority is None and not tree_mode:
7208                                                         # Greedily pop all of these nodes since no
7209                                                         # relationship has been ignored. This optimization
7210                                                         # destroys --tree output, so it's disabled in tree
7211                                                         # mode.
7212                                                         selected_nodes = nodes
7213                                                 else:
7214                                                         # For optimal merge order:
7215                                                         #  * Only pop one node.
7216                                                         #  * Removing a root node (node without a parent)
7217                                                         #    will not produce a leaf node, so avoid it.
7218                                                         #  * It's normal for a selected uninstall to be a
7219                                                         #    root node, so don't check them for parents.
7220                                                         for node in nodes:
7221                                                                 if node.operation == "uninstall" or \
7222                                                                         mygraph.parent_nodes(node):
7223                                                                         selected_nodes = [node]
7224                                                                         break
7225
7226                                                 if selected_nodes:
7227                                                         break
7228
7229                         if not selected_nodes:
7230                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7231                                 if nodes:
7232                                         mergeable_nodes = set(nodes)
7233                                         if prefer_asap and asap_nodes:
7234                                                 nodes = asap_nodes
7235                                         for i in xrange(priority_range.SOFT,
7236                                                 priority_range.MEDIUM_SOFT + 1):
7237                                                 ignore_priority = priority_range.ignore_priority[i]
7238                                                 for node in nodes:
7239                                                         if not mygraph.parent_nodes(node):
7240                                                                 continue
7241                                                         selected_nodes = set()
7242                                                         if gather_deps(ignore_priority,
7243                                                                 mergeable_nodes, selected_nodes, node):
7244                                                                 break
7245                                                         else:
7246                                                                 selected_nodes = None
7247                                                 if selected_nodes:
7248                                                         break
7249
7250                                         if prefer_asap and asap_nodes and not selected_nodes:
7251                                                 # We failed to find any asap nodes to merge, so ignore
7252                                                 # them for the next iteration.
7253                                                 prefer_asap = False
7254                                                 continue
7255
7256                         if selected_nodes and ignore_priority is not None:
7257                                 # Try to merge ignored medium_soft deps as soon as possible
7258                                 # if they're not satisfied by installed packages.
7259                                 for node in selected_nodes:
7260                                         children = set(mygraph.child_nodes(node))
7261                                         soft = children.difference(
7262                                                 mygraph.child_nodes(node,
7263                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7264                                         medium_soft = children.difference(
7265                                                 mygraph.child_nodes(node,
7266                                                         ignore_priority = \
7267                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7268                                         medium_soft.difference_update(soft)
7269                                         for child in medium_soft:
7270                                                 if child in selected_nodes:
7271                                                         continue
7272                                                 if child in asap_nodes:
7273                                                         continue
7274                                                 asap_nodes.append(child)
7275
7276                         if selected_nodes and len(selected_nodes) > 1:
7277                                 if not isinstance(selected_nodes, list):
7278                                         selected_nodes = list(selected_nodes)
7279                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7280
7281                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7282                                 # An Uninstall task needs to be executed in order to
7283                                 # avoid conflict if possible.
7284
7285                                 if drop_satisfied:
7286                                         priority_range = DepPrioritySatisfiedRange
7287                                 else:
7288                                         priority_range = DepPriorityNormalRange
7289
7290                                 mergeable_nodes = get_nodes(
7291                                         ignore_priority=ignore_uninst_or_med)
7292
7293                                 min_parent_deps = None
7294                                 uninst_task = None
7295                                 for task in myblocker_uninstalls.leaf_nodes():
7296                                         # Do some sanity checks so that system or world packages
7297                                         # don't get uninstalled inappropriately here (only really
7298                                         # necessary when --complete-graph has not been enabled).
7299
7300                                         if task in ignored_uninstall_tasks:
7301                                                 continue
7302
7303                                         if task in scheduled_uninstalls:
7304                                                 # It's been scheduled but it hasn't
7305                                                 # been executed yet due to dependence
7306                                                 # on installation of blocking packages.
7307                                                 continue
7308
7309                                         root_config = self.roots[task.root]
7310                                         inst_pkg = self._pkg_cache[
7311                                                 ("installed", task.root, task.cpv, "nomerge")]
7312
7313                                         if self.digraph.contains(inst_pkg):
7314                                                 continue
7315
7316                                         forbid_overlap = False
7317                                         heuristic_overlap = False
7318                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7319                                                 if blocker.eapi in ("0", "1"):
7320                                                         heuristic_overlap = True
7321                                                 elif blocker.atom.blocker.overlap.forbid:
7322                                                         forbid_overlap = True
7323                                                         break
7324                                         if forbid_overlap and running_root == task.root:
7325                                                 continue
7326
7327                                         if heuristic_overlap and running_root == task.root:
7328                                                 # Never uninstall sys-apps/portage or it's essential
7329                                                 # dependencies, except through replacement.
7330                                                 try:
7331                                                         runtime_dep_atoms = \
7332                                                                 list(runtime_deps.iterAtomsForPackage(task))
7333                                                 except portage.exception.InvalidDependString, e:
7334                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7335                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7336                                                                 (task.root, task.cpv, e), noiselevel=-1)
7337                                                         del e
7338                                                         continue
7339
7340                                                 # Don't uninstall a runtime dep if it appears
7341                                                 # to be the only suitable one installed.
7342                                                 skip = False
7343                                                 vardb = root_config.trees["vartree"].dbapi
7344                                                 for atom in runtime_dep_atoms:
7345                                                         other_version = None
7346                                                         for pkg in vardb.match_pkgs(atom):
7347                                                                 if pkg.cpv == task.cpv and \
7348                                                                         pkg.metadata["COUNTER"] == \
7349                                                                         task.metadata["COUNTER"]:
7350                                                                         continue
7351                                                                 other_version = pkg
7352                                                                 break
7353                                                         if other_version is None:
7354                                                                 skip = True
7355                                                                 break
7356                                                 if skip:
7357                                                         continue
7358
7359                                                 # For packages in the system set, don't take
7360                                                 # any chances. If the conflict can't be resolved
7361                                                 # by a normal replacement operation then abort.
7362                                                 skip = False
7363                                                 try:
7364                                                         for atom in root_config.sets[
7365                                                                 "system"].iterAtomsForPackage(task):
7366                                                                 skip = True
7367                                                                 break
7368                                                 except portage.exception.InvalidDependString, e:
7369                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7370                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7371                                                                 (task.root, task.cpv, e), noiselevel=-1)
7372                                                         del e
7373                                                         skip = True
7374                                                 if skip:
7375                                                         continue
7376
7377                                         # Note that the world check isn't always
7378                                         # necessary since self._complete_graph() will
7379                                         # add all packages from the system and world sets to the
7380                                         # graph. This just allows unresolved conflicts to be
7381                                         # detected as early as possible, which makes it possible
7382                                         # to avoid calling self._complete_graph() when it is
7383                                         # unnecessary due to blockers triggering an abortion.
7384                                         if not complete:
7385                                                 # For packages in the world set, go ahead an uninstall
7386                                                 # when necessary, as long as the atom will be satisfied
7387                                                 # in the final state.
7388                                                 graph_db = self.mydbapi[task.root]
7389                                                 skip = False
7390                                                 try:
7391                                                         for atom in root_config.sets[
7392                                                                 "world"].iterAtomsForPackage(task):
7393                                                                 satisfied = False
7394                                                                 for pkg in graph_db.match_pkgs(atom):
7395                                                                         if pkg == inst_pkg:
7396                                                                                 continue
7397                                                                         satisfied = True
7398                                                                         break
7399                                                                 if not satisfied:
7400                                                                         skip = True
7401                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7402                                                                         break
7403                                                 except portage.exception.InvalidDependString, e:
7404                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7405                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7406                                                                 (task.root, task.cpv, e), noiselevel=-1)
7407                                                         del e
7408                                                         skip = True
7409                                                 if skip:
7410                                                         continue
7411
7412                                         # Check the deps of parent nodes to ensure that
7413                                         # the chosen task produces a leaf node. Maybe
7414                                         # this can be optimized some more to make the
7415                                         # best possible choice, but the current algorithm
7416                                         # is simple and should be near optimal for most
7417                                         # common cases.
7418                                         mergeable_parent = False
7419                                         parent_deps = set()
7420                                         for parent in mygraph.parent_nodes(task):
7421                                                 parent_deps.update(mygraph.child_nodes(parent,
7422                                                         ignore_priority=priority_range.ignore_medium_soft))
7423                                                 if parent in mergeable_nodes and \
7424                                                         gather_deps(ignore_uninst_or_med_soft,
7425                                                         mergeable_nodes, set(), parent):
7426                                                         mergeable_parent = True
7427
7428                                         if not mergeable_parent:
7429                                                 continue
7430
7431                                         parent_deps.remove(task)
7432                                         if min_parent_deps is None or \
7433                                                 len(parent_deps) < min_parent_deps:
7434                                                 min_parent_deps = len(parent_deps)
7435                                                 uninst_task = task
7436
7437                                 if uninst_task is not None:
7438                                         # The uninstall is performed only after blocking
7439                                         # packages have been merged on top of it. File
7440                                         # collisions between blocking packages are detected
7441                                         # and removed from the list of files to be uninstalled.
7442                                         scheduled_uninstalls.add(uninst_task)
7443                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7444
7445                                         # Reverse the parent -> uninstall edges since we want
7446                                         # to do the uninstall after blocking packages have
7447                                         # been merged on top of it.
7448                                         mygraph.remove(uninst_task)
7449                                         for blocked_pkg in parent_nodes:
7450                                                 mygraph.add(blocked_pkg, uninst_task,
7451                                                         priority=BlockerDepPriority.instance)
7452                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7453                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7454                                                         priority=BlockerDepPriority.instance)
7455
7456                                         # Reset the state variables for leaf node selection and
7457                                         # continue trying to select leaf nodes.
7458                                         prefer_asap = True
7459                                         drop_satisfied = False
7460                                         continue
7461
7462                         if not selected_nodes:
7463                                 # Only select root nodes as a last resort. This case should
7464                                 # only trigger when the graph is nearly empty and the only
7465                                 # remaining nodes are isolated (no parents or children). Since
7466                                 # the nodes must be isolated, ignore_priority is not needed.
7467                                 selected_nodes = get_nodes()
7468
7469                         if not selected_nodes and not drop_satisfied:
7470                                 drop_satisfied = True
7471                                 continue
7472
7473                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7474                                 # If possible, drop an uninstall task here in order to avoid
7475                                 # the circular deps code path. The corresponding blocker will
7476                                 # still be counted as an unresolved conflict.
7477                                 uninst_task = None
7478                                 for node in myblocker_uninstalls.leaf_nodes():
7479                                         try:
7480                                                 mygraph.remove(node)
7481                                         except KeyError:
7482                                                 pass
7483                                         else:
7484                                                 uninst_task = node
7485                                                 ignored_uninstall_tasks.add(node)
7486                                                 break
7487
7488                                 if uninst_task is not None:
7489                                         # Reset the state variables for leaf node selection and
7490                                         # continue trying to select leaf nodes.
7491                                         prefer_asap = True
7492                                         drop_satisfied = False
7493                                         continue
7494
7495                         if not selected_nodes:
7496                                 self._circular_deps_for_display = mygraph
7497                                 raise self._unknown_internal_error()
7498
7499                         # At this point, we've succeeded in selecting one or more nodes, so
7500                         # reset state variables for leaf node selection.
7501                         prefer_asap = True
7502                         drop_satisfied = False
7503
7504                         mygraph.difference_update(selected_nodes)
7505
7506                         for node in selected_nodes:
7507                                 if isinstance(node, Package) and \
7508                                         node.operation == "nomerge":
7509                                         continue
7510
7511                                 # Handle interactions between blockers
7512                                 # and uninstallation tasks.
7513                                 solved_blockers = set()
7514                                 uninst_task = None
7515                                 if isinstance(node, Package) and \
7516                                         "uninstall" == node.operation:
7517                                         have_uninstall_task = True
7518                                         uninst_task = node
7519                                 else:
7520                                         vardb = self.trees[node.root]["vartree"].dbapi
7521                                         previous_cpv = vardb.match(node.slot_atom)
7522                                         if previous_cpv:
7523                                                 # The package will be replaced by this one, so remove
7524                                                 # the corresponding Uninstall task if necessary.
7525                                                 previous_cpv = previous_cpv[0]
7526                                                 uninst_task = \
7527                                                         ("installed", node.root, previous_cpv, "uninstall")
7528                                                 try:
7529                                                         mygraph.remove(uninst_task)
7530                                                 except KeyError:
7531                                                         pass
7532
7533                                 if uninst_task is not None and \
7534                                         uninst_task not in ignored_uninstall_tasks and \
7535                                         myblocker_uninstalls.contains(uninst_task):
7536                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7537                                         myblocker_uninstalls.remove(uninst_task)
7538                                         # Discard any blockers that this Uninstall solves.
7539                                         for blocker in blocker_nodes:
7540                                                 if not myblocker_uninstalls.child_nodes(blocker):
7541                                                         myblocker_uninstalls.remove(blocker)
7542                                                         solved_blockers.add(blocker)
7543
7544                                 retlist.append(node)
7545
7546                                 if (isinstance(node, Package) and \
7547                                         "uninstall" == node.operation) or \
7548                                         (uninst_task is not None and \
7549                                         uninst_task in scheduled_uninstalls):
7550                                         # Include satisfied blockers in the merge list
7551                                         # since the user might be interested and also
7552                                         # it serves as an indicator that blocking packages
7553                                         # will be temporarily installed simultaneously.
7554                                         for blocker in solved_blockers:
7555                                                 retlist.append(Blocker(atom=blocker.atom,
7556                                                         root=blocker.root, eapi=blocker.eapi,
7557                                                         satisfied=True))
7558
7559                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7560                 for node in myblocker_uninstalls.root_nodes():
7561                         unsolvable_blockers.add(node)
7562
7563                 for blocker in unsolvable_blockers:
7564                         retlist.append(blocker)
7565
7566                 # If any Uninstall tasks need to be executed in order
7567                 # to avoid a conflict, complete the graph with any
7568                 # dependencies that may have been initially
7569                 # neglected (to ensure that unsafe Uninstall tasks
7570                 # are properly identified and blocked from execution).
7571                 if have_uninstall_task and \
7572                         not complete and \
7573                         not unsolvable_blockers:
7574                         self.myparams.add("complete")
7575                         raise self._serialize_tasks_retry("")
7576
7577                 if unsolvable_blockers and \
7578                         not self._accept_blocker_conflicts():
7579                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7580                         self._serialized_tasks_cache = retlist[:]
7581                         self._scheduler_graph = scheduler_graph
7582                         raise self._unknown_internal_error()
7583
7584                 if self._slot_collision_info and \
7585                         not self._accept_blocker_conflicts():
7586                         self._serialized_tasks_cache = retlist[:]
7587                         self._scheduler_graph = scheduler_graph
7588                         raise self._unknown_internal_error()
7589
7590                 return retlist, scheduler_graph
7591
7592         def _show_circular_deps(self, mygraph):
7593                 # No leaf nodes are available, so we have a circular
7594                 # dependency panic situation.  Reduce the noise level to a
7595                 # minimum via repeated elimination of root nodes since they
7596                 # have no parents and thus can not be part of a cycle.
7597                 while True:
7598                         root_nodes = mygraph.root_nodes(
7599                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7600                         if not root_nodes:
7601                                 break
7602                         mygraph.difference_update(root_nodes)
7603                 # Display the USE flags that are enabled on nodes that are part
7604                 # of dependency cycles in case that helps the user decide to
7605                 # disable some of them.
7606                 display_order = []
7607                 tempgraph = mygraph.copy()
7608                 while not tempgraph.empty():
7609                         nodes = tempgraph.leaf_nodes()
7610                         if not nodes:
7611                                 node = tempgraph.order[0]
7612                         else:
7613                                 node = nodes[0]
7614                         display_order.append(node)
7615                         tempgraph.remove(node)
7616                 display_order.reverse()
7617                 self.myopts.pop("--quiet", None)
7618                 self.myopts.pop("--verbose", None)
7619                 self.myopts["--tree"] = True
7620                 portage.writemsg("\n\n", noiselevel=-1)
7621                 self.display(display_order)
7622                 prefix = colorize("BAD", " * ")
7623                 portage.writemsg("\n", noiselevel=-1)
7624                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7625                         noiselevel=-1)
7626                 portage.writemsg("\n", noiselevel=-1)
7627                 mygraph.debug_print()
7628                 portage.writemsg("\n", noiselevel=-1)
7629                 portage.writemsg(prefix + "Note that circular dependencies " + \
7630                         "can often be avoided by temporarily\n", noiselevel=-1)
7631                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7632                         "optional dependencies.\n", noiselevel=-1)
7633
7634         def _show_merge_list(self):
7635                 if self._serialized_tasks_cache is not None and \
7636                         not (self._displayed_list and \
7637                         (self._displayed_list == self._serialized_tasks_cache or \
7638                         self._displayed_list == \
7639                                 list(reversed(self._serialized_tasks_cache)))):
7640                         display_list = self._serialized_tasks_cache[:]
7641                         if "--tree" in self.myopts:
7642                                 display_list.reverse()
7643                         self.display(display_list)
7644
7645         def _show_unsatisfied_blockers(self, blockers):
7646                 self._show_merge_list()
7647                 msg = "Error: The above package list contains " + \
7648                         "packages which cannot be installed " + \
7649                         "at the same time on the same system."
7650                 prefix = colorize("BAD", " * ")
7651                 from textwrap import wrap
7652                 portage.writemsg("\n", noiselevel=-1)
7653                 for line in wrap(msg, 70):
7654                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7655
7656                 # Display the conflicting packages along with the packages
7657                 # that pulled them in. This is helpful for troubleshooting
7658                 # cases in which blockers don't solve automatically and
7659                 # the reasons are not apparent from the normal merge list
7660                 # display.
7661
7662                 conflict_pkgs = {}
7663                 for blocker in blockers:
7664                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7665                                 self._blocker_parents.parent_nodes(blocker)):
7666                                 parent_atoms = self._parent_atoms.get(pkg)
7667                                 if not parent_atoms:
7668                                         atom = self._blocked_world_pkgs.get(pkg)
7669                                         if atom is not None:
7670                                                 parent_atoms = set([("@world", atom)])
7671                                 if parent_atoms:
7672                                         conflict_pkgs[pkg] = parent_atoms
7673
7674                 if conflict_pkgs:
7675                         # Reduce noise by pruning packages that are only
7676                         # pulled in by other conflict packages.
7677                         pruned_pkgs = set()
7678                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7679                                 relevant_parent = False
7680                                 for parent, atom in parent_atoms:
7681                                         if parent not in conflict_pkgs:
7682                                                 relevant_parent = True
7683                                                 break
7684                                 if not relevant_parent:
7685                                         pruned_pkgs.add(pkg)
7686                         for pkg in pruned_pkgs:
7687                                 del conflict_pkgs[pkg]
7688
7689                 if conflict_pkgs:
7690                         msg = []
7691                         msg.append("\n")
7692                         indent = "  "
7693                         # Max number of parents shown, to avoid flooding the display.
7694                         max_parents = 3
7695                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7696
7697                                 pruned_list = set()
7698
7699                                 # Prefer packages that are not directly involved in a conflict.
7700                                 for parent_atom in parent_atoms:
7701                                         if len(pruned_list) >= max_parents:
7702                                                 break
7703                                         parent, atom = parent_atom
7704                                         if parent not in conflict_pkgs:
7705                                                 pruned_list.add(parent_atom)
7706
7707                                 for parent_atom in parent_atoms:
7708                                         if len(pruned_list) >= max_parents:
7709                                                 break
7710                                         pruned_list.add(parent_atom)
7711
7712                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7713                                 msg.append(indent + "%s pulled in by\n" % pkg)
7714
7715                                 for parent_atom in pruned_list:
7716                                         parent, atom = parent_atom
7717                                         msg.append(2*indent)
7718                                         if isinstance(parent,
7719                                                 (PackageArg, AtomArg)):
7720                                                 # For PackageArg and AtomArg types, it's
7721                                                 # redundant to display the atom attribute.
7722                                                 msg.append(str(parent))
7723                                         else:
7724                                                 # Display the specific atom from SetArg or
7725                                                 # Package types.
7726                                                 msg.append("%s required by %s" % (atom, parent))
7727                                         msg.append("\n")
7728
7729                                 if omitted_parents:
7730                                         msg.append(2*indent)
7731                                         msg.append("(and %d more)\n" % omitted_parents)
7732
7733                                 msg.append("\n")
7734
7735                         sys.stderr.write("".join(msg))
7736                         sys.stderr.flush()
7737
7738                 if "--quiet" not in self.myopts:
7739                         show_blocker_docs_link()
7740
7741         def display(self, mylist, favorites=[], verbosity=None):
7742
7743                 # This is used to prevent display_problems() from
7744                 # redundantly displaying this exact same merge list
7745                 # again via _show_merge_list().
7746                 self._displayed_list = mylist
7747
7748                 if verbosity is None:
7749                         verbosity = ("--quiet" in self.myopts and 1 or \
7750                                 "--verbose" in self.myopts and 3 or 2)
7751                 favorites_set = InternalPackageSet(favorites)
7752                 oneshot = "--oneshot" in self.myopts or \
7753                         "--onlydeps" in self.myopts
7754                 columns = "--columns" in self.myopts
7755                 changelogs=[]
7756                 p=[]
7757                 blockers = []
7758
7759                 counters = PackageCounters()
7760
7761                 if verbosity == 1 and "--verbose" not in self.myopts:
7762                         def create_use_string(*args):
7763                                 return ""
7764                 else:
7765                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7766                                 old_iuse, old_use,
7767                                 is_new, reinst_flags,
7768                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7769                                 alphabetical=("--alphabetical" in self.myopts)):
7770                                 enabled = []
7771                                 if alphabetical:
7772                                         disabled = enabled
7773                                         removed = enabled
7774                                 else:
7775                                         disabled = []
7776                                         removed = []
7777                                 cur_iuse = set(cur_iuse)
7778                                 enabled_flags = cur_iuse.intersection(cur_use)
7779                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7780                                 any_iuse = cur_iuse.union(old_iuse)
7781                                 any_iuse = list(any_iuse)
7782                                 any_iuse.sort()
7783                                 for flag in any_iuse:
7784                                         flag_str = None
7785                                         isEnabled = False
7786                                         reinst_flag = reinst_flags and flag in reinst_flags
7787                                         if flag in enabled_flags:
7788                                                 isEnabled = True
7789                                                 if is_new or flag in old_use and \
7790                                                         (all_flags or reinst_flag):
7791                                                         flag_str = red(flag)
7792                                                 elif flag not in old_iuse:
7793                                                         flag_str = yellow(flag) + "%*"
7794                                                 elif flag not in old_use:
7795                                                         flag_str = green(flag) + "*"
7796                                         elif flag in removed_iuse:
7797                                                 if all_flags or reinst_flag:
7798                                                         flag_str = yellow("-" + flag) + "%"
7799                                                         if flag in old_use:
7800                                                                 flag_str += "*"
7801                                                         flag_str = "(" + flag_str + ")"
7802                                                         removed.append(flag_str)
7803                                                 continue
7804                                         else:
7805                                                 if is_new or flag in old_iuse and \
7806                                                         flag not in old_use and \
7807                                                         (all_flags or reinst_flag):
7808                                                         flag_str = blue("-" + flag)
7809                                                 elif flag not in old_iuse:
7810                                                         flag_str = yellow("-" + flag)
7811                                                         if flag not in iuse_forced:
7812                                                                 flag_str += "%"
7813                                                 elif flag in old_use:
7814                                                         flag_str = green("-" + flag) + "*"
7815                                         if flag_str:
7816                                                 if flag in iuse_forced:
7817                                                         flag_str = "(" + flag_str + ")"
7818                                                 if isEnabled:
7819                                                         enabled.append(flag_str)
7820                                                 else:
7821                                                         disabled.append(flag_str)
7822
7823                                 if alphabetical:
7824                                         ret = " ".join(enabled)
7825                                 else:
7826                                         ret = " ".join(enabled + disabled + removed)
7827                                 if ret:
7828                                         ret = '%s="%s" ' % (name, ret)
7829                                 return ret
7830
7831                 repo_display = RepoDisplay(self.roots)
7832
7833                 tree_nodes = []
7834                 display_list = []
7835                 mygraph = self.digraph.copy()
7836
7837                 # If there are any Uninstall instances, add the corresponding
7838                 # blockers to the digraph (useful for --tree display).
7839
7840                 executed_uninstalls = set(node for node in mylist \
7841                         if isinstance(node, Package) and node.operation == "unmerge")
7842
7843                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7844                         uninstall_parents = \
7845                                 self._blocker_uninstalls.parent_nodes(uninstall)
7846                         if not uninstall_parents:
7847                                 continue
7848
7849                         # Remove the corresponding "nomerge" node and substitute
7850                         # the Uninstall node.
7851                         inst_pkg = self._pkg_cache[
7852                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7853                         try:
7854                                 mygraph.remove(inst_pkg)
7855                         except KeyError:
7856                                 pass
7857
7858                         try:
7859                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7860                         except KeyError:
7861                                 inst_pkg_blockers = []
7862
7863                         # Break the Package -> Uninstall edges.
7864                         mygraph.remove(uninstall)
7865
7866                         # Resolution of a package's blockers
7867                         # depend on it's own uninstallation.
7868                         for blocker in inst_pkg_blockers:
7869                                 mygraph.add(uninstall, blocker)
7870
7871                         # Expand Package -> Uninstall edges into
7872                         # Package -> Blocker -> Uninstall edges.
7873                         for blocker in uninstall_parents:
7874                                 mygraph.add(uninstall, blocker)
7875                                 for parent in self._blocker_parents.parent_nodes(blocker):
7876                                         if parent != inst_pkg:
7877                                                 mygraph.add(blocker, parent)
7878
7879                         # If the uninstall task did not need to be executed because
7880                         # of an upgrade, display Blocker -> Upgrade edges since the
7881                         # corresponding Blocker -> Uninstall edges will not be shown.
7882                         upgrade_node = \
7883                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7884                         if upgrade_node is not None and \
7885                                 uninstall not in executed_uninstalls:
7886                                 for blocker in uninstall_parents:
7887                                         mygraph.add(upgrade_node, blocker)
7888
7889                 unsatisfied_blockers = []
7890                 i = 0
7891                 depth = 0
7892                 shown_edges = set()
7893                 for x in mylist:
7894                         if isinstance(x, Blocker) and not x.satisfied:
7895                                 unsatisfied_blockers.append(x)
7896                                 continue
7897                         graph_key = x
7898                         if "--tree" in self.myopts:
7899                                 depth = len(tree_nodes)
7900                                 while depth and graph_key not in \
7901                                         mygraph.child_nodes(tree_nodes[depth-1]):
7902                                                 depth -= 1
7903                                 if depth:
7904                                         tree_nodes = tree_nodes[:depth]
7905                                         tree_nodes.append(graph_key)
7906                                         display_list.append((x, depth, True))
7907                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7908                                 else:
7909                                         traversed_nodes = set() # prevent endless circles
7910                                         traversed_nodes.add(graph_key)
7911                                         def add_parents(current_node, ordered):
7912                                                 parent_nodes = None
7913                                                 # Do not traverse to parents if this node is an
7914                                                 # an argument or a direct member of a set that has
7915                                                 # been specified as an argument (system or world).
7916                                                 if current_node not in self._set_nodes:
7917                                                         parent_nodes = mygraph.parent_nodes(current_node)
7918                                                 if parent_nodes:
7919                                                         child_nodes = set(mygraph.child_nodes(current_node))
7920                                                         selected_parent = None
7921                                                         # First, try to avoid a direct cycle.
7922                                                         for node in parent_nodes:
7923                                                                 if not isinstance(node, (Blocker, Package)):
7924                                                                         continue
7925                                                                 if node not in traversed_nodes and \
7926                                                                         node not in child_nodes:
7927                                                                         edge = (current_node, node)
7928                                                                         if edge in shown_edges:
7929                                                                                 continue
7930                                                                         selected_parent = node
7931                                                                         break
7932                                                         if not selected_parent:
7933                                                                 # A direct cycle is unavoidable.
7934                                                                 for node in parent_nodes:
7935                                                                         if not isinstance(node, (Blocker, Package)):
7936                                                                                 continue
7937                                                                         if node not in traversed_nodes:
7938                                                                                 edge = (current_node, node)
7939                                                                                 if edge in shown_edges:
7940                                                                                         continue
7941                                                                                 selected_parent = node
7942                                                                                 break
7943                                                         if selected_parent:
7944                                                                 shown_edges.add((current_node, selected_parent))
7945                                                                 traversed_nodes.add(selected_parent)
7946                                                                 add_parents(selected_parent, False)
7947                                                 display_list.append((current_node,
7948                                                         len(tree_nodes), ordered))
7949                                                 tree_nodes.append(current_node)
7950                                         tree_nodes = []
7951                                         add_parents(graph_key, True)
7952                         else:
7953                                 display_list.append((x, depth, True))
7954                 mylist = display_list
7955                 for x in unsatisfied_blockers:
7956                         mylist.append((x, 0, True))
7957
7958                 last_merge_depth = 0
7959                 for i in xrange(len(mylist)-1,-1,-1):
7960                         graph_key, depth, ordered = mylist[i]
7961                         if not ordered and depth == 0 and i > 0 \
7962                                 and graph_key == mylist[i-1][0] and \
7963                                 mylist[i-1][1] == 0:
7964                                 # An ordered node got a consecutive duplicate when the tree was
7965                                 # being filled in.
7966                                 del mylist[i]
7967                                 continue
7968                         if ordered and graph_key[-1] != "nomerge":
7969                                 last_merge_depth = depth
7970                                 continue
7971                         if depth >= last_merge_depth or \
7972                                 i < len(mylist) - 1 and \
7973                                 depth >= mylist[i+1][1]:
7974                                         del mylist[i]
7975
7976                 from portage import flatten
7977                 from portage.dep import use_reduce, paren_reduce
7978                 # files to fetch list - avoids counting a same file twice
7979                 # in size display (verbose mode)
7980                 myfetchlist=[]
7981
7982                 # Use this set to detect when all the "repoadd" strings are "[0]"
7983                 # and disable the entire repo display in this case.
7984                 repoadd_set = set()
7985
7986                 for mylist_index in xrange(len(mylist)):
7987                         x, depth, ordered = mylist[mylist_index]
7988                         pkg_type = x[0]
7989                         myroot = x[1]
7990                         pkg_key = x[2]
7991                         portdb = self.trees[myroot]["porttree"].dbapi
7992                         bindb  = self.trees[myroot]["bintree"].dbapi
7993                         vardb = self.trees[myroot]["vartree"].dbapi
7994                         vartree = self.trees[myroot]["vartree"]
7995                         pkgsettings = self.pkgsettings[myroot]
7996
7997                         fetch=" "
7998                         indent = " " * depth
7999
8000                         if isinstance(x, Blocker):
8001                                 if x.satisfied:
8002                                         blocker_style = "PKG_BLOCKER_SATISFIED"
8003                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
8004                                 else:
8005                                         blocker_style = "PKG_BLOCKER"
8006                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8007                                 if ordered:
8008                                         counters.blocks += 1
8009                                         if x.satisfied:
8010                                                 counters.blocks_satisfied += 1
8011                                 resolved = portage.key_expand(
8012                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8013                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8014                                         addl += " " + colorize(blocker_style, resolved)
8015                                 else:
8016                                         addl = "[%s %s] %s%s" % \
8017                                                 (colorize(blocker_style, "blocks"),
8018                                                 addl, indent, colorize(blocker_style, resolved))
8019                                 block_parents = self._blocker_parents.parent_nodes(x)
8020                                 block_parents = set([pnode[2] for pnode in block_parents])
8021                                 block_parents = ", ".join(block_parents)
8022                                 if resolved!=x[2]:
8023                                         addl += colorize(blocker_style,
8024                                                 " (\"%s\" is blocking %s)") % \
8025                                                 (str(x.atom).lstrip("!"), block_parents)
8026                                 else:
8027                                         addl += colorize(blocker_style,
8028                                                 " (is blocking %s)") % block_parents
8029                                 if isinstance(x, Blocker) and x.satisfied:
8030                                         if columns:
8031                                                 continue
8032                                         p.append(addl)
8033                                 else:
8034                                         blockers.append(addl)
8035                         else:
8036                                 pkg_status = x[3]
8037                                 pkg_merge = ordered and pkg_status == "merge"
8038                                 if not pkg_merge and pkg_status == "merge":
8039                                         pkg_status = "nomerge"
8040                                 built = pkg_type != "ebuild"
8041                                 installed = pkg_type == "installed"
8042                                 pkg = x
8043                                 metadata = pkg.metadata
8044                                 ebuild_path = None
8045                                 repo_name = metadata["repository"]
8046                                 if pkg_type == "ebuild":
8047                                         ebuild_path = portdb.findname(pkg_key)
8048                                         if not ebuild_path: # shouldn't happen
8049                                                 raise portage.exception.PackageNotFound(pkg_key)
8050                                         repo_path_real = os.path.dirname(os.path.dirname(
8051                                                 os.path.dirname(ebuild_path)))
8052                                 else:
8053                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8054                                 pkg_use = list(pkg.use.enabled)
8055                                 try:
8056                                         restrict = flatten(use_reduce(paren_reduce(
8057                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8058                                 except portage.exception.InvalidDependString, e:
8059                                         if not pkg.installed:
8060                                                 show_invalid_depstring_notice(x,
8061                                                         pkg.metadata["RESTRICT"], str(e))
8062                                                 del e
8063                                                 return 1
8064                                         restrict = []
8065                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8066                                         "fetch" in restrict:
8067                                         fetch = red("F")
8068                                         if ordered:
8069                                                 counters.restrict_fetch += 1
8070                                         if portdb.fetch_check(pkg_key, pkg_use):
8071                                                 fetch = green("f")
8072                                                 if ordered:
8073                                                         counters.restrict_fetch_satisfied += 1
8074
8075                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8076                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8077                                 myoldbest = []
8078                                 myinslotlist = None
8079                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8080                                 if vardb.cpv_exists(pkg_key):
8081                                         addl="  "+yellow("R")+fetch+"  "
8082                                         if ordered:
8083                                                 if pkg_merge:
8084                                                         counters.reinst += 1
8085                                                 elif pkg_status == "uninstall":
8086                                                         counters.uninst += 1
8087                                 # filter out old-style virtual matches
8088                                 elif installed_versions and \
8089                                         portage.cpv_getkey(installed_versions[0]) == \
8090                                         portage.cpv_getkey(pkg_key):
8091                                         myinslotlist = vardb.match(pkg.slot_atom)
8092                                         # If this is the first install of a new-style virtual, we
8093                                         # need to filter out old-style virtual matches.
8094                                         if myinslotlist and \
8095                                                 portage.cpv_getkey(myinslotlist[0]) != \
8096                                                 portage.cpv_getkey(pkg_key):
8097                                                 myinslotlist = None
8098                                         if myinslotlist:
8099                                                 myoldbest = myinslotlist[:]
8100                                                 addl = "   " + fetch
8101                                                 if not portage.dep.cpvequal(pkg_key,
8102                                                         portage.best([pkg_key] + myoldbest)):
8103                                                         # Downgrade in slot
8104                                                         addl += turquoise("U")+blue("D")
8105                                                         if ordered:
8106                                                                 counters.downgrades += 1
8107                                                 else:
8108                                                         # Update in slot
8109                                                         addl += turquoise("U") + " "
8110                                                         if ordered:
8111                                                                 counters.upgrades += 1
8112                                         else:
8113                                                 # New slot, mark it new.
8114                                                 addl = " " + green("NS") + fetch + "  "
8115                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8116                                                 if ordered:
8117                                                         counters.newslot += 1
8118
8119                                         if "--changelog" in self.myopts:
8120                                                 inst_matches = vardb.match(pkg.slot_atom)
8121                                                 if inst_matches:
8122                                                         changelogs.extend(self.calc_changelog(
8123                                                                 portdb.findname(pkg_key),
8124                                                                 inst_matches[0], pkg_key))
8125                                 else:
8126                                         addl = " " + green("N") + " " + fetch + "  "
8127                                         if ordered:
8128                                                 counters.new += 1
8129
8130                                 verboseadd = ""
8131                                 repoadd = None
8132
8133                                 if True:
8134                                         # USE flag display
8135                                         forced_flags = set()
8136                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8137                                         forced_flags.update(pkgsettings.useforce)
8138                                         forced_flags.update(pkgsettings.usemask)
8139
8140                                         cur_use = [flag for flag in pkg.use.enabled \
8141                                                 if flag in pkg.iuse.all]
8142                                         cur_iuse = sorted(pkg.iuse.all)
8143
8144                                         if myoldbest and myinslotlist:
8145                                                 previous_cpv = myoldbest[0]
8146                                         else:
8147                                                 previous_cpv = pkg.cpv
8148                                         if vardb.cpv_exists(previous_cpv):
8149                                                 old_iuse, old_use = vardb.aux_get(
8150                                                                 previous_cpv, ["IUSE", "USE"])
8151                                                 old_iuse = list(set(
8152                                                         filter_iuse_defaults(old_iuse.split())))
8153                                                 old_iuse.sort()
8154                                                 old_use = old_use.split()
8155                                                 is_new = False
8156                                         else:
8157                                                 old_iuse = []
8158                                                 old_use = []
8159                                                 is_new = True
8160
8161                                         old_use = [flag for flag in old_use if flag in old_iuse]
8162
8163                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8164                                         use_expand.sort()
8165                                         use_expand.reverse()
8166                                         use_expand_hidden = \
8167                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8168
8169                                         def map_to_use_expand(myvals, forcedFlags=False,
8170                                                 removeHidden=True):
8171                                                 ret = {}
8172                                                 forced = {}
8173                                                 for exp in use_expand:
8174                                                         ret[exp] = []
8175                                                         forced[exp] = set()
8176                                                         for val in myvals[:]:
8177                                                                 if val.startswith(exp.lower()+"_"):
8178                                                                         if val in forced_flags:
8179                                                                                 forced[exp].add(val[len(exp)+1:])
8180                                                                         ret[exp].append(val[len(exp)+1:])
8181                                                                         myvals.remove(val)
8182                                                 ret["USE"] = myvals
8183                                                 forced["USE"] = [val for val in myvals \
8184                                                         if val in forced_flags]
8185                                                 if removeHidden:
8186                                                         for exp in use_expand_hidden:
8187                                                                 ret.pop(exp, None)
8188                                                 if forcedFlags:
8189                                                         return ret, forced
8190                                                 return ret
8191
8192                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8193                                         # are the only thing that triggered reinstallation.
8194                                         reinst_flags_map = {}
8195                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8196                                         reinst_expand_map = None
8197                                         if reinstall_for_flags:
8198                                                 reinst_flags_map = map_to_use_expand(
8199                                                         list(reinstall_for_flags), removeHidden=False)
8200                                                 for k in list(reinst_flags_map):
8201                                                         if not reinst_flags_map[k]:
8202                                                                 del reinst_flags_map[k]
8203                                                 if not reinst_flags_map.get("USE"):
8204                                                         reinst_expand_map = reinst_flags_map.copy()
8205                                                         reinst_expand_map.pop("USE", None)
8206                                         if reinst_expand_map and \
8207                                                 not set(reinst_expand_map).difference(
8208                                                 use_expand_hidden):
8209                                                 use_expand_hidden = \
8210                                                         set(use_expand_hidden).difference(
8211                                                         reinst_expand_map)
8212
8213                                         cur_iuse_map, iuse_forced = \
8214                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8215                                         cur_use_map = map_to_use_expand(cur_use)
8216                                         old_iuse_map = map_to_use_expand(old_iuse)
8217                                         old_use_map = map_to_use_expand(old_use)
8218
8219                                         use_expand.sort()
8220                                         use_expand.insert(0, "USE")
8221                                         
8222                                         for key in use_expand:
8223                                                 if key in use_expand_hidden:
8224                                                         continue
8225                                                 verboseadd += create_use_string(key.upper(),
8226                                                         cur_iuse_map[key], iuse_forced[key],
8227                                                         cur_use_map[key], old_iuse_map[key],
8228                                                         old_use_map[key], is_new,
8229                                                         reinst_flags_map.get(key))
8230
8231                                 if verbosity == 3:
8232                                         # size verbose
8233                                         mysize=0
8234                                         if pkg_type == "ebuild" and pkg_merge:
8235                                                 try:
8236                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8237                                                                 useflags=pkg_use, debug=self.edebug)
8238                                                 except portage.exception.InvalidDependString, e:
8239                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8240                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8241                                                         del e
8242                                                         return 1
8243                                                 if myfilesdict is None:
8244                                                         myfilesdict="[empty/missing/bad digest]"
8245                                                 else:
8246                                                         for myfetchfile in myfilesdict:
8247                                                                 if myfetchfile not in myfetchlist:
8248                                                                         mysize+=myfilesdict[myfetchfile]
8249                                                                         myfetchlist.append(myfetchfile)
8250                                                         if ordered:
8251                                                                 counters.totalsize += mysize
8252                                                 verboseadd += format_size(mysize)
8253
8254                                         # overlay verbose
8255                                         # assign index for a previous version in the same slot
8256                                         has_previous = False
8257                                         repo_name_prev = None
8258                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8259                                                 metadata["SLOT"])
8260                                         slot_matches = vardb.match(slot_atom)
8261                                         if slot_matches:
8262                                                 has_previous = True
8263                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8264                                                         ["repository"])[0]
8265
8266                                         # now use the data to generate output
8267                                         if pkg.installed or not has_previous:
8268                                                 repoadd = repo_display.repoStr(repo_path_real)
8269                                         else:
8270                                                 repo_path_prev = None
8271                                                 if repo_name_prev:
8272                                                         repo_path_prev = portdb.getRepositoryPath(
8273                                                                 repo_name_prev)
8274                                                 if repo_path_prev == repo_path_real:
8275                                                         repoadd = repo_display.repoStr(repo_path_real)
8276                                                 else:
8277                                                         repoadd = "%s=>%s" % (
8278                                                                 repo_display.repoStr(repo_path_prev),
8279                                                                 repo_display.repoStr(repo_path_real))
8280                                         if repoadd:
8281                                                 repoadd_set.add(repoadd)
8282
8283                                 xs = [portage.cpv_getkey(pkg_key)] + \
8284                                         list(portage.catpkgsplit(pkg_key)[2:])
8285                                 if xs[2] == "r0":
8286                                         xs[2] = ""
8287                                 else:
8288                                         xs[2] = "-" + xs[2]
8289
8290                                 mywidth = 130
8291                                 if "COLUMNWIDTH" in self.settings:
8292                                         try:
8293                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8294                                         except ValueError, e:
8295                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8296                                                 portage.writemsg(
8297                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8298                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8299                                                 del e
8300                                 oldlp = mywidth - 30
8301                                 newlp = oldlp - 30
8302
8303                                 # Convert myoldbest from a list to a string.
8304                                 if not myoldbest:
8305                                         myoldbest = ""
8306                                 else:
8307                                         for pos, key in enumerate(myoldbest):
8308                                                 key = portage.catpkgsplit(key)[2] + \
8309                                                         "-" + portage.catpkgsplit(key)[3]
8310                                                 if key[-3:] == "-r0":
8311                                                         key = key[:-3]
8312                                                 myoldbest[pos] = key
8313                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8314
8315                                 pkg_cp = xs[0]
8316                                 root_config = self.roots[myroot]
8317                                 system_set = root_config.sets["system"]
8318                                 world_set  = root_config.sets["world"]
8319
8320                                 pkg_system = False
8321                                 pkg_world = False
8322                                 try:
8323                                         pkg_system = system_set.findAtomForPackage(pkg)
8324                                         pkg_world  = world_set.findAtomForPackage(pkg)
8325                                         if not (oneshot or pkg_world) and \
8326                                                 myroot == self.target_root and \
8327                                                 favorites_set.findAtomForPackage(pkg):
8328                                                 # Maybe it will be added to world now.
8329                                                 if create_world_atom(pkg, favorites_set, root_config):
8330                                                         pkg_world = True
8331                                 except portage.exception.InvalidDependString:
8332                                         # This is reported elsewhere if relevant.
8333                                         pass
8334
8335                                 def pkgprint(pkg_str):
8336                                         if pkg_merge:
8337                                                 if pkg_system:
8338                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8339                                                 elif pkg_world:
8340                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8341                                                 else:
8342                                                         return colorize("PKG_MERGE", pkg_str)
8343                                         elif pkg_status == "uninstall":
8344                                                 return colorize("PKG_UNINSTALL", pkg_str)
8345                                         else:
8346                                                 if pkg_system:
8347                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8348                                                 elif pkg_world:
8349                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8350                                                 else:
8351                                                         return colorize("PKG_NOMERGE", pkg_str)
8352
8353                                 try:
8354                                         properties = flatten(use_reduce(paren_reduce(
8355                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8356                                 except portage.exception.InvalidDependString, e:
8357                                         if not pkg.installed:
8358                                                 show_invalid_depstring_notice(pkg,
8359                                                         pkg.metadata["PROPERTIES"], str(e))
8360                                                 del e
8361                                                 return 1
8362                                         properties = []
8363                                 interactive = "interactive" in properties
8364                                 if interactive and pkg.operation == "merge":
8365                                         addl = colorize("WARN", "I") + addl[1:]
8366                                         if ordered:
8367                                                 counters.interactive += 1
8368
8369                                 if x[1]!="/":
8370                                         if myoldbest:
8371                                                 myoldbest +=" "
8372                                         if "--columns" in self.myopts:
8373                                                 if "--quiet" in self.myopts:
8374                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8375                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8376                                                         myprint=myprint+myoldbest
8377                                                         myprint=myprint+darkgreen("to "+x[1])
8378                                                         verboseadd = None
8379                                                 else:
8380                                                         if not pkg_merge:
8381                                                                 myprint = "[%s] %s%s" % \
8382                                                                         (pkgprint(pkg_status.ljust(13)),
8383                                                                         indent, pkgprint(pkg.cp))
8384                                                         else:
8385                                                                 myprint = "[%s %s] %s%s" % \
8386                                                                         (pkgprint(pkg.type_name), addl,
8387                                                                         indent, pkgprint(pkg.cp))
8388                                                         if (newlp-nc_len(myprint)) > 0:
8389                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8390                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8391                                                         if (oldlp-nc_len(myprint)) > 0:
8392                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8393                                                         myprint=myprint+myoldbest
8394                                                         myprint += darkgreen("to " + pkg.root)
8395                                         else:
8396                                                 if not pkg_merge:
8397                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8398                                                 else:
8399                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8400                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8401                                                         myoldbest + darkgreen("to " + myroot)
8402                                 else:
8403                                         if "--columns" in self.myopts:
8404                                                 if "--quiet" in self.myopts:
8405                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8406                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8407                                                         myprint=myprint+myoldbest
8408                                                         verboseadd = None
8409                                                 else:
8410                                                         if not pkg_merge:
8411                                                                 myprint = "[%s] %s%s" % \
8412                                                                         (pkgprint(pkg_status.ljust(13)),
8413                                                                         indent, pkgprint(pkg.cp))
8414                                                         else:
8415                                                                 myprint = "[%s %s] %s%s" % \
8416                                                                         (pkgprint(pkg.type_name), addl,
8417                                                                         indent, pkgprint(pkg.cp))
8418                                                         if (newlp-nc_len(myprint)) > 0:
8419                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8420                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8421                                                         if (oldlp-nc_len(myprint)) > 0:
8422                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8423                                                         myprint += myoldbest
8424                                         else:
8425                                                 if not pkg_merge:
8426                                                         myprint = "[%s] %s%s %s" % \
8427                                                                 (pkgprint(pkg_status.ljust(13)),
8428                                                                 indent, pkgprint(pkg.cpv),
8429                                                                 myoldbest)
8430                                                 else:
8431                                                         myprint = "[%s %s] %s%s %s" % \
8432                                                                 (pkgprint(pkg_type), addl, indent,
8433                                                                 pkgprint(pkg.cpv), myoldbest)
8434
8435                                 if columns and pkg.operation == "uninstall":
8436                                         continue
8437                                 p.append((myprint, verboseadd, repoadd))
8438
8439                                 if "--tree" not in self.myopts and \
8440                                         "--quiet" not in self.myopts and \
8441                                         not self._opts_no_restart.intersection(self.myopts) and \
8442                                         pkg.root == self._running_root.root and \
8443                                         portage.match_from_list(
8444                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8445                                         not vardb.cpv_exists(pkg.cpv) and \
8446                                         "--quiet" not in self.myopts:
8447                                                 if mylist_index < len(mylist) - 1:
8448                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8449                                                         p.append(colorize("WARN", "    then resume the merge."))
8450
8451                 out = sys.stdout
8452                 show_repos = repoadd_set and repoadd_set != set(["0"])
8453
8454                 for x in p:
8455                         if isinstance(x, basestring):
8456                                 out.write("%s\n" % (x,))
8457                                 continue
8458
8459                         myprint, verboseadd, repoadd = x
8460
8461                         if verboseadd:
8462                                 myprint += " " + verboseadd
8463
8464                         if show_repos and repoadd:
8465                                 myprint += " " + teal("[%s]" % repoadd)
8466
8467                         out.write("%s\n" % (myprint,))
8468
8469                 for x in blockers:
8470                         print x
8471
8472                 if verbosity == 3:
8473                         print
8474                         print counters
8475                         if show_repos:
8476                                 sys.stdout.write(str(repo_display))
8477
8478                 if "--changelog" in self.myopts:
8479                         print
8480                         for revision,text in changelogs:
8481                                 print bold('*'+revision)
8482                                 sys.stdout.write(text)
8483
8484                 sys.stdout.flush()
8485                 return os.EX_OK
8486
8487         def display_problems(self):
8488                 """
8489                 Display problems with the dependency graph such as slot collisions.
8490                 This is called internally by display() to show the problems _after_
8491                 the merge list where it is most likely to be seen, but if display()
8492                 is not going to be called then this method should be called explicitly
8493                 to ensure that the user is notified of problems with the graph.
8494
8495                 All output goes to stderr, except for unsatisfied dependencies which
8496                 go to stdout for parsing by programs such as autounmask.
8497                 """
8498
8499                 # Note that show_masked_packages() sends it's output to
8500                 # stdout, and some programs such as autounmask parse the
8501                 # output in cases when emerge bails out. However, when
8502                 # show_masked_packages() is called for installed packages
8503                 # here, the message is a warning that is more appropriate
8504                 # to send to stderr, so temporarily redirect stdout to
8505                 # stderr. TODO: Fix output code so there's a cleaner way
8506                 # to redirect everything to stderr.
8507                 sys.stdout.flush()
8508                 sys.stderr.flush()
8509                 stdout = sys.stdout
8510                 try:
8511                         sys.stdout = sys.stderr
8512                         self._display_problems()
8513                 finally:
8514                         sys.stdout = stdout
8515                         sys.stdout.flush()
8516                         sys.stderr.flush()
8517
8518                 # This goes to stdout for parsing by programs like autounmask.
8519                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8520                         self._show_unsatisfied_dep(*pargs, **kwargs)
8521
8522         def _display_problems(self):
8523                 if self._circular_deps_for_display is not None:
8524                         self._show_circular_deps(
8525                                 self._circular_deps_for_display)
8526
8527                 # The user is only notified of a slot conflict if
8528                 # there are no unresolvable blocker conflicts.
8529                 if self._unsatisfied_blockers_for_display is not None:
8530                         self._show_unsatisfied_blockers(
8531                                 self._unsatisfied_blockers_for_display)
8532                 else:
8533                         self._show_slot_collision_notice()
8534
8535                 # TODO: Add generic support for "set problem" handlers so that
8536                 # the below warnings aren't special cases for world only.
8537
8538                 if self._missing_args:
8539                         world_problems = False
8540                         if "world" in self._sets:
8541                                 # Filter out indirect members of world (from nested sets)
8542                                 # since only direct members of world are desired here.
8543                                 world_set = self.roots[self.target_root].sets["world"]
8544                                 for arg, atom in self._missing_args:
8545                                         if arg.name == "world" and atom in world_set:
8546                                                 world_problems = True
8547                                                 break
8548
8549                         if world_problems:
8550                                 sys.stderr.write("\n!!! Problems have been " + \
8551                                         "detected with your world file\n")
8552                                 sys.stderr.write("!!! Please run " + \
8553                                         green("emaint --check world")+"\n\n")
8554
8555                 if self._missing_args:
8556                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8557                                 " Ebuilds for the following packages are either all\n")
8558                         sys.stderr.write(colorize("BAD", "!!!") + \
8559                                 " masked or don't exist:\n")
8560                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8561                                 self._missing_args) + "\n")
8562
8563                 if self._pprovided_args:
8564                         arg_refs = {}
8565                         for arg, atom in self._pprovided_args:
8566                                 if isinstance(arg, SetArg):
8567                                         parent = arg.name
8568                                         arg_atom = (atom, atom)
8569                                 else:
8570                                         parent = "args"
8571                                         arg_atom = (arg.arg, atom)
8572                                 refs = arg_refs.setdefault(arg_atom, [])
8573                                 if parent not in refs:
8574                                         refs.append(parent)
8575                         msg = []
8576                         msg.append(bad("\nWARNING: "))
8577                         if len(self._pprovided_args) > 1:
8578                                 msg.append("Requested packages will not be " + \
8579                                         "merged because they are listed in\n")
8580                         else:
8581                                 msg.append("A requested package will not be " + \
8582                                         "merged because it is listed in\n")
8583                         msg.append("package.provided:\n\n")
8584                         problems_sets = set()
8585                         for (arg, atom), refs in arg_refs.iteritems():
8586                                 ref_string = ""
8587                                 if refs:
8588                                         problems_sets.update(refs)
8589                                         refs.sort()
8590                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8591                                         ref_string = " pulled in by " + ref_string
8592                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8593                         msg.append("\n")
8594                         if "world" in problems_sets:
8595                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8596                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8597                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8598                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8599                                 msg.append("The best course of action depends on the reason that an offending\n")
8600                                 msg.append("package.provided entry exists.\n\n")
8601                         sys.stderr.write("".join(msg))
8602
8603                 masked_packages = []
8604                 for pkg in self._masked_installed:
8605                         root_config = pkg.root_config
8606                         pkgsettings = self.pkgsettings[pkg.root]
8607                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8608                         masked_packages.append((root_config, pkgsettings,
8609                                 pkg.cpv, pkg.metadata, mreasons))
8610                 if masked_packages:
8611                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8612                                 " The following installed packages are masked:\n")
8613                         show_masked_packages(masked_packages)
8614                         show_mask_docs()
8615                         print
8616
8617         def calc_changelog(self,ebuildpath,current,next):
8618                 if ebuildpath == None or not os.path.exists(ebuildpath):
8619                         return []
8620                 current = '-'.join(portage.catpkgsplit(current)[1:])
8621                 if current.endswith('-r0'):
8622                         current = current[:-3]
8623                 next = '-'.join(portage.catpkgsplit(next)[1:])
8624                 if next.endswith('-r0'):
8625                         next = next[:-3]
8626                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8627                 try:
8628                         changelog = open(changelogpath).read()
8629                 except SystemExit, e:
8630                         raise # Needed else can't exit
8631                 except:
8632                         return []
8633                 divisions = self.find_changelog_tags(changelog)
8634                 #print 'XX from',current,'to',next
8635                 #for div,text in divisions: print 'XX',div
8636                 # skip entries for all revisions above the one we are about to emerge
8637                 for i in range(len(divisions)):
8638                         if divisions[i][0]==next:
8639                                 divisions = divisions[i:]
8640                                 break
8641                 # find out how many entries we are going to display
8642                 for i in range(len(divisions)):
8643                         if divisions[i][0]==current:
8644                                 divisions = divisions[:i]
8645                                 break
8646                 else:
8647                     # couldnt find the current revision in the list. display nothing
8648                         return []
8649                 return divisions
8650
8651         def find_changelog_tags(self,changelog):
8652                 divs = []
8653                 release = None
8654                 while 1:
8655                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8656                         if match is None:
8657                                 if release is not None:
8658                                         divs.append((release,changelog))
8659                                 return divs
8660                         if release is not None:
8661                                 divs.append((release,changelog[:match.start()]))
8662                         changelog = changelog[match.end():]
8663                         release = match.group(1)
8664                         if release.endswith('.ebuild'):
8665                                 release = release[:-7]
8666                         if release.endswith('-r0'):
8667                                 release = release[:-3]
8668
8669         def saveNomergeFavorites(self):
8670                 """Find atoms in favorites that are not in the mergelist and add them
8671                 to the world file if necessary."""
8672                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8673                         "--oneshot", "--onlydeps", "--pretend"):
8674                         if x in self.myopts:
8675                                 return
8676                 root_config = self.roots[self.target_root]
8677                 world_set = root_config.sets["world"]
8678
8679                 world_locked = False
8680                 if hasattr(world_set, "lock"):
8681                         world_set.lock()
8682                         world_locked = True
8683
8684                 if hasattr(world_set, "load"):
8685                         world_set.load() # maybe it's changed on disk
8686
8687                 args_set = self._sets["args"]
8688                 portdb = self.trees[self.target_root]["porttree"].dbapi
8689                 added_favorites = set()
8690                 for x in self._set_nodes:
8691                         pkg_type, root, pkg_key, pkg_status = x
8692                         if pkg_status != "nomerge":
8693                                 continue
8694
8695                         try:
8696                                 myfavkey = create_world_atom(x, args_set, root_config)
8697                                 if myfavkey:
8698                                         if myfavkey in added_favorites:
8699                                                 continue
8700                                         added_favorites.add(myfavkey)
8701                         except portage.exception.InvalidDependString, e:
8702                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8703                                         (pkg_key, str(e)), noiselevel=-1)
8704                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8705                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8706                                 del e
8707                 all_added = []
8708                 for k in self._sets:
8709                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8710                                 continue
8711                         s = SETPREFIX + k
8712                         if s in world_set:
8713                                 continue
8714                         all_added.append(SETPREFIX + k)
8715                 all_added.extend(added_favorites)
8716                 all_added.sort()
8717                 for a in all_added:
8718                         print ">>> Recording %s in \"world\" favorites file..." % \
8719                                 colorize("INFORM", str(a))
8720                 if all_added:
8721                         world_set.update(all_added)
8722
8723                 if world_locked:
8724                         world_set.unlock()
8725
8726         def loadResumeCommand(self, resume_data, skip_masked=False):
8727                 """
8728                 Add a resume command to the graph and validate it in the process.  This
8729                 will raise a PackageNotFound exception if a package is not available.
8730                 """
8731
8732                 if not isinstance(resume_data, dict):
8733                         return False
8734
8735                 mergelist = resume_data.get("mergelist")
8736                 if not isinstance(mergelist, list):
8737                         mergelist = []
8738
8739                 fakedb = self.mydbapi
8740                 trees = self.trees
8741                 serialized_tasks = []
8742                 masked_tasks = []
8743                 for x in mergelist:
8744                         if not (isinstance(x, list) and len(x) == 4):
8745                                 continue
8746                         pkg_type, myroot, pkg_key, action = x
8747                         if pkg_type not in self.pkg_tree_map:
8748                                 continue
8749                         if action != "merge":
8750                                 continue
8751                         tree_type = self.pkg_tree_map[pkg_type]
8752                         mydb = trees[myroot][tree_type].dbapi
8753                         db_keys = list(self._trees_orig[myroot][
8754                                 tree_type].dbapi._aux_cache_keys)
8755                         try:
8756                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8757                         except KeyError:
8758                                 # It does no exist or it is corrupt.
8759                                 if action == "uninstall":
8760                                         continue
8761                                 raise portage.exception.PackageNotFound(pkg_key)
8762                         installed = action == "uninstall"
8763                         built = pkg_type != "ebuild"
8764                         root_config = self.roots[myroot]
8765                         pkg = Package(built=built, cpv=pkg_key,
8766                                 installed=installed, metadata=metadata,
8767                                 operation=action, root_config=root_config,
8768                                 type_name=pkg_type)
8769                         if pkg_type == "ebuild":
8770                                 pkgsettings = self.pkgsettings[myroot]
8771                                 pkgsettings.setcpv(pkg)
8772                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8773                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8774                         self._pkg_cache[pkg] = pkg
8775
8776                         root_config = self.roots[pkg.root]
8777                         if "merge" == pkg.operation and \
8778                                 not visible(root_config.settings, pkg):
8779                                 if skip_masked:
8780                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8781                                 else:
8782                                         self._unsatisfied_deps_for_display.append(
8783                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8784
8785                         fakedb[myroot].cpv_inject(pkg)
8786                         serialized_tasks.append(pkg)
8787                         self.spinner.update()
8788
8789                 if self._unsatisfied_deps_for_display:
8790                         return False
8791
8792                 if not serialized_tasks or "--nodeps" in self.myopts:
8793                         self._serialized_tasks_cache = serialized_tasks
8794                         self._scheduler_graph = self.digraph
8795                 else:
8796                         self._select_package = self._select_pkg_from_graph
8797                         self.myparams.add("selective")
8798                         # Always traverse deep dependencies in order to account for
8799                         # potentially unsatisfied dependencies of installed packages.
8800                         # This is necessary for correct --keep-going or --resume operation
8801                         # in case a package from a group of circularly dependent packages
8802                         # fails. In this case, a package which has recently been installed
8803                         # may have an unsatisfied circular dependency (pulled in by
8804                         # PDEPEND, for example). So, even though a package is already
8805                         # installed, it may not have all of it's dependencies satisfied, so
8806                         # it may not be usable. If such a package is in the subgraph of
8807                         # deep depenedencies of a scheduled build, that build needs to
8808                         # be cancelled. In order for this type of situation to be
8809                         # recognized, deep traversal of dependencies is required.
8810                         self.myparams.add("deep")
8811
8812                         favorites = resume_data.get("favorites")
8813                         args_set = self._sets["args"]
8814                         if isinstance(favorites, list):
8815                                 args = self._load_favorites(favorites)
8816                         else:
8817                                 args = []
8818
8819                         for task in serialized_tasks:
8820                                 if isinstance(task, Package) and \
8821                                         task.operation == "merge":
8822                                         if not self._add_pkg(task, None):
8823                                                 return False
8824
8825                         # Packages for argument atoms need to be explicitly
8826                         # added via _add_pkg() so that they are included in the
8827                         # digraph (needed at least for --tree display).
8828                         for arg in args:
8829                                 for atom in arg.set:
8830                                         pkg, existing_node = self._select_package(
8831                                                 arg.root_config.root, atom)
8832                                         if existing_node is None and \
8833                                                 pkg is not None:
8834                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8835                                                         root=pkg.root, parent=arg)):
8836                                                         return False
8837
8838                         # Allow unsatisfied deps here to avoid showing a masking
8839                         # message for an unsatisfied dep that isn't necessarily
8840                         # masked.
8841                         if not self._create_graph(allow_unsatisfied=True):
8842                                 return False
8843
8844                         unsatisfied_deps = []
8845                         for dep in self._unsatisfied_deps:
8846                                 if not isinstance(dep.parent, Package):
8847                                         continue
8848                                 if dep.parent.operation == "merge":
8849                                         unsatisfied_deps.append(dep)
8850                                         continue
8851
8852                                 # For unsatisfied deps of installed packages, only account for
8853                                 # them if they are in the subgraph of dependencies of a package
8854                                 # which is scheduled to be installed.
8855                                 unsatisfied_install = False
8856                                 traversed = set()
8857                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8858                                 while dep_stack:
8859                                         node = dep_stack.pop()
8860                                         if not isinstance(node, Package):
8861                                                 continue
8862                                         if node.operation == "merge":
8863                                                 unsatisfied_install = True
8864                                                 break
8865                                         if node in traversed:
8866                                                 continue
8867                                         traversed.add(node)
8868                                         dep_stack.extend(self.digraph.parent_nodes(node))
8869
8870                                 if unsatisfied_install:
8871                                         unsatisfied_deps.append(dep)
8872
8873                         if masked_tasks or unsatisfied_deps:
8874                                 # This probably means that a required package
8875                                 # was dropped via --skipfirst. It makes the
8876                                 # resume list invalid, so convert it to a
8877                                 # UnsatisfiedResumeDep exception.
8878                                 raise self.UnsatisfiedResumeDep(self,
8879                                         masked_tasks + unsatisfied_deps)
8880                         self._serialized_tasks_cache = None
8881                         try:
8882                                 self.altlist()
8883                         except self._unknown_internal_error:
8884                                 return False
8885
8886                 return True
8887
8888         def _load_favorites(self, favorites):
8889                 """
8890                 Use a list of favorites to resume state from a
8891                 previous select_files() call. This creates similar
8892                 DependencyArg instances to those that would have
8893                 been created by the original select_files() call.
8894                 This allows Package instances to be matched with
8895                 DependencyArg instances during graph creation.
8896                 """
8897                 root_config = self.roots[self.target_root]
8898                 getSetAtoms = root_config.setconfig.getSetAtoms
8899                 sets = root_config.sets
8900                 args = []
8901                 for x in favorites:
8902                         if not isinstance(x, basestring):
8903                                 continue
8904                         if x in ("system", "world"):
8905                                 x = SETPREFIX + x
8906                         if x.startswith(SETPREFIX):
8907                                 s = x[len(SETPREFIX):]
8908                                 if s not in sets:
8909                                         continue
8910                                 if s in self._sets:
8911                                         continue
8912                                 # Recursively expand sets so that containment tests in
8913                                 # self._get_parent_sets() properly match atoms in nested
8914                                 # sets (like if world contains system).
8915                                 expanded_set = InternalPackageSet(
8916                                         initial_atoms=getSetAtoms(s))
8917                                 self._sets[s] = expanded_set
8918                                 args.append(SetArg(arg=x, set=expanded_set,
8919                                         root_config=root_config))
8920                         else:
8921                                 if not portage.isvalidatom(x):
8922                                         continue
8923                                 args.append(AtomArg(arg=x, atom=x,
8924                                         root_config=root_config))
8925
8926                 self._set_args(args)
8927                 return args
8928
8929         class UnsatisfiedResumeDep(portage.exception.PortageException):
8930                 """
8931                 A dependency of a resume list is not installed. This
8932                 can occur when a required package is dropped from the
8933                 merge list via --skipfirst.
8934                 """
8935                 def __init__(self, depgraph, value):
8936                         portage.exception.PortageException.__init__(self, value)
8937                         self.depgraph = depgraph
8938
8939         class _internal_exception(portage.exception.PortageException):
8940                 def __init__(self, value=""):
8941                         portage.exception.PortageException.__init__(self, value)
8942
8943         class _unknown_internal_error(_internal_exception):
8944                 """
8945                 Used by the depgraph internally to terminate graph creation.
8946                 The specific reason for the failure should have been dumped
8947                 to stderr, unfortunately, the exact reason for the failure
8948                 may not be known.
8949                 """
8950
8951         class _serialize_tasks_retry(_internal_exception):
8952                 """
8953                 This is raised by the _serialize_tasks() method when it needs to
8954                 be called again for some reason. The only case that it's currently
8955                 used for is when neglected dependencies need to be added to the
8956                 graph in order to avoid making a potentially unsafe decision.
8957                 """
8958
8959         class _dep_check_composite_db(portage.dbapi):
8960                 """
8961                 A dbapi-like interface that is optimized for use in dep_check() calls.
8962                 This is built on top of the existing depgraph package selection logic.
8963                 Some packages that have been added to the graph may be masked from this
8964                 view in order to influence the atom preference selection that occurs
8965                 via dep_check().
8966                 """
8967                 def __init__(self, depgraph, root):
8968                         portage.dbapi.__init__(self)
8969                         self._depgraph = depgraph
8970                         self._root = root
8971                         self._match_cache = {}
8972                         self._cpv_pkg_map = {}
8973
8974                 def _clear_cache(self):
8975                         self._match_cache.clear()
8976                         self._cpv_pkg_map.clear()
8977
8978                 def match(self, atom):
8979                         ret = self._match_cache.get(atom)
8980                         if ret is not None:
8981                                 return ret[:]
8982                         orig_atom = atom
8983                         if "/" not in atom:
8984                                 atom = self._dep_expand(atom)
8985                         pkg, existing = self._depgraph._select_package(self._root, atom)
8986                         if not pkg:
8987                                 ret = []
8988                         else:
8989                                 # Return the highest available from select_package() as well as
8990                                 # any matching slots in the graph db.
8991                                 slots = set()
8992                                 slots.add(pkg.metadata["SLOT"])
8993                                 atom_cp = portage.dep_getkey(atom)
8994                                 if pkg.cp.startswith("virtual/"):
8995                                         # For new-style virtual lookahead that occurs inside
8996                                         # dep_check(), examine all slots. This is needed
8997                                         # so that newer slots will not unnecessarily be pulled in
8998                                         # when a satisfying lower slot is already installed. For
8999                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
9000                                         # there's no need to pull in a newer slot to satisfy a
9001                                         # virtual/jdk dependency.
9002                                         for db, pkg_type, built, installed, db_keys in \
9003                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
9004                                                 for cpv in db.match(atom):
9005                                                         if portage.cpv_getkey(cpv) != pkg.cp:
9006                                                                 continue
9007                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9008                                 ret = []
9009                                 if self._visible(pkg):
9010                                         self._cpv_pkg_map[pkg.cpv] = pkg
9011                                         ret.append(pkg.cpv)
9012                                 slots.remove(pkg.metadata["SLOT"])
9013                                 while slots:
9014                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9015                                         pkg, existing = self._depgraph._select_package(
9016                                                 self._root, slot_atom)
9017                                         if not pkg:
9018                                                 continue
9019                                         if not self._visible(pkg):
9020                                                 continue
9021                                         self._cpv_pkg_map[pkg.cpv] = pkg
9022                                         ret.append(pkg.cpv)
9023                                 if ret:
9024                                         self._cpv_sort_ascending(ret)
9025                         self._match_cache[orig_atom] = ret
9026                         return ret[:]
9027
9028                 def _visible(self, pkg):
9029                         if pkg.installed and "selective" not in self._depgraph.myparams:
9030                                 try:
9031                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9032                                 except (StopIteration, portage.exception.InvalidDependString):
9033                                         arg = None
9034                                 if arg:
9035                                         return False
9036                         if pkg.installed:
9037                                 try:
9038                                         if not visible(
9039                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9040                                                 return False
9041                                 except portage.exception.InvalidDependString:
9042                                         pass
9043                         in_graph = self._depgraph._slot_pkg_map[
9044                                 self._root].get(pkg.slot_atom)
9045                         if in_graph is None:
9046                                 # Mask choices for packages which are not the highest visible
9047                                 # version within their slot (since they usually trigger slot
9048                                 # conflicts).
9049                                 highest_visible, in_graph = self._depgraph._select_package(
9050                                         self._root, pkg.slot_atom)
9051                                 if pkg != highest_visible:
9052                                         return False
9053                         elif in_graph != pkg:
9054                                 # Mask choices for packages that would trigger a slot
9055                                 # conflict with a previously selected package.
9056                                 return False
9057                         return True
9058
9059                 def _dep_expand(self, atom):
9060                         """
9061                         This is only needed for old installed packages that may
9062                         contain atoms that are not fully qualified with a specific
9063                         category. Emulate the cpv_expand() function that's used by
9064                         dbapi.match() in cases like this. If there are multiple
9065                         matches, it's often due to a new-style virtual that has
9066                         been added, so try to filter those out to avoid raising
9067                         a ValueError.
9068                         """
9069                         root_config = self._depgraph.roots[self._root]
9070                         orig_atom = atom
9071                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9072                         if len(expanded_atoms) > 1:
9073                                 non_virtual_atoms = []
9074                                 for x in expanded_atoms:
9075                                         if not portage.dep_getkey(x).startswith("virtual/"):
9076                                                 non_virtual_atoms.append(x)
9077                                 if len(non_virtual_atoms) == 1:
9078                                         expanded_atoms = non_virtual_atoms
9079                         if len(expanded_atoms) > 1:
9080                                 # compatible with portage.cpv_expand()
9081                                 raise portage.exception.AmbiguousPackageName(
9082                                         [portage.dep_getkey(x) for x in expanded_atoms])
9083                         if expanded_atoms:
9084                                 atom = expanded_atoms[0]
9085                         else:
9086                                 null_atom = insert_category_into_atom(atom, "null")
9087                                 null_cp = portage.dep_getkey(null_atom)
9088                                 cat, atom_pn = portage.catsplit(null_cp)
9089                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9090                                 if virts_p:
9091                                         # Allow the resolver to choose which virtual.
9092                                         atom = insert_category_into_atom(atom, "virtual")
9093                                 else:
9094                                         atom = insert_category_into_atom(atom, "null")
9095                         return atom
9096
9097                 def aux_get(self, cpv, wants):
9098                         metadata = self._cpv_pkg_map[cpv].metadata
9099                         return [metadata.get(x, "") for x in wants]
9100
9101 class RepoDisplay(object):
9102         def __init__(self, roots):
9103                 self._shown_repos = {}
9104                 self._unknown_repo = False
9105                 repo_paths = set()
9106                 for root_config in roots.itervalues():
9107                         portdir = root_config.settings.get("PORTDIR")
9108                         if portdir:
9109                                 repo_paths.add(portdir)
9110                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9111                         if overlays:
9112                                 repo_paths.update(overlays.split())
9113                 repo_paths = list(repo_paths)
9114                 self._repo_paths = repo_paths
9115                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9116                         for repo_path in repo_paths ]
9117
9118                 # pre-allocate index for PORTDIR so that it always has index 0.
9119                 for root_config in roots.itervalues():
9120                         portdb = root_config.trees["porttree"].dbapi
9121                         portdir = portdb.porttree_root
9122                         if portdir:
9123                                 self.repoStr(portdir)
9124
9125         def repoStr(self, repo_path_real):
9126                 real_index = -1
9127                 if repo_path_real:
9128                         real_index = self._repo_paths_real.index(repo_path_real)
9129                 if real_index == -1:
9130                         s = "?"
9131                         self._unknown_repo = True
9132                 else:
9133                         shown_repos = self._shown_repos
9134                         repo_paths = self._repo_paths
9135                         repo_path = repo_paths[real_index]
9136                         index = shown_repos.get(repo_path)
9137                         if index is None:
9138                                 index = len(shown_repos)
9139                                 shown_repos[repo_path] = index
9140                         s = str(index)
9141                 return s
9142
9143         def __str__(self):
9144                 output = []
9145                 shown_repos = self._shown_repos
9146                 unknown_repo = self._unknown_repo
9147                 if shown_repos or self._unknown_repo:
9148                         output.append("Portage tree and overlays:\n")
9149                 show_repo_paths = list(shown_repos)
9150                 for repo_path, repo_index in shown_repos.iteritems():
9151                         show_repo_paths[repo_index] = repo_path
9152                 if show_repo_paths:
9153                         for index, repo_path in enumerate(show_repo_paths):
9154                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9155                 if unknown_repo:
9156                         output.append(" "+teal("[?]") + \
9157                                 " indicates that the source repository could not be determined\n")
9158                 return "".join(output)
9159
9160 class PackageCounters(object):
9161
9162         def __init__(self):
9163                 self.upgrades   = 0
9164                 self.downgrades = 0
9165                 self.new        = 0
9166                 self.newslot    = 0
9167                 self.reinst     = 0
9168                 self.uninst     = 0
9169                 self.blocks     = 0
9170                 self.blocks_satisfied         = 0
9171                 self.totalsize  = 0
9172                 self.restrict_fetch           = 0
9173                 self.restrict_fetch_satisfied = 0
9174                 self.interactive              = 0
9175
9176         def __str__(self):
9177                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9178                 myoutput = []
9179                 details = []
9180                 myoutput.append("Total: %s package" % total_installs)
9181                 if total_installs != 1:
9182                         myoutput.append("s")
9183                 if total_installs != 0:
9184                         myoutput.append(" (")
9185                 if self.upgrades > 0:
9186                         details.append("%s upgrade" % self.upgrades)
9187                         if self.upgrades > 1:
9188                                 details[-1] += "s"
9189                 if self.downgrades > 0:
9190                         details.append("%s downgrade" % self.downgrades)
9191                         if self.downgrades > 1:
9192                                 details[-1] += "s"
9193                 if self.new > 0:
9194                         details.append("%s new" % self.new)
9195                 if self.newslot > 0:
9196                         details.append("%s in new slot" % self.newslot)
9197                         if self.newslot > 1:
9198                                 details[-1] += "s"
9199                 if self.reinst > 0:
9200                         details.append("%s reinstall" % self.reinst)
9201                         if self.reinst > 1:
9202                                 details[-1] += "s"
9203                 if self.uninst > 0:
9204                         details.append("%s uninstall" % self.uninst)
9205                         if self.uninst > 1:
9206                                 details[-1] += "s"
9207                 if self.interactive > 0:
9208                         details.append("%s %s" % (self.interactive,
9209                                 colorize("WARN", "interactive")))
9210                 myoutput.append(", ".join(details))
9211                 if total_installs != 0:
9212                         myoutput.append(")")
9213                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9214                 if self.restrict_fetch:
9215                         myoutput.append("\nFetch Restriction: %s package" % \
9216                                 self.restrict_fetch)
9217                         if self.restrict_fetch > 1:
9218                                 myoutput.append("s")
9219                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9220                         myoutput.append(bad(" (%s unsatisfied)") % \
9221                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9222                 if self.blocks > 0:
9223                         myoutput.append("\nConflict: %s block" % \
9224                                 self.blocks)
9225                         if self.blocks > 1:
9226                                 myoutput.append("s")
9227                         if self.blocks_satisfied < self.blocks:
9228                                 myoutput.append(bad(" (%s unsatisfied)") % \
9229                                         (self.blocks - self.blocks_satisfied))
9230                 return "".join(myoutput)
9231
9232 class PollSelectAdapter(PollConstants):
9233
9234         """
9235         Use select to emulate a poll object, for
9236         systems that don't support poll().
9237         """
9238
9239         def __init__(self):
9240                 self._registered = {}
9241                 self._select_args = [[], [], []]
9242
9243         def register(self, fd, *args):
9244                 """
9245                 Only POLLIN is currently supported!
9246                 """
9247                 if len(args) > 1:
9248                         raise TypeError(
9249                                 "register expected at most 2 arguments, got " + \
9250                                 repr(1 + len(args)))
9251
9252                 eventmask = PollConstants.POLLIN | \
9253                         PollConstants.POLLPRI | PollConstants.POLLOUT
9254                 if args:
9255                         eventmask = args[0]
9256
9257                 self._registered[fd] = eventmask
9258                 self._select_args = None
9259
9260         def unregister(self, fd):
9261                 self._select_args = None
9262                 del self._registered[fd]
9263
9264         def poll(self, *args):
9265                 if len(args) > 1:
9266                         raise TypeError(
9267                                 "poll expected at most 2 arguments, got " + \
9268                                 repr(1 + len(args)))
9269
9270                 timeout = None
9271                 if args:
9272                         timeout = args[0]
9273
9274                 select_args = self._select_args
9275                 if select_args is None:
9276                         select_args = [self._registered.keys(), [], []]
9277
9278                 if timeout is not None:
9279                         select_args = select_args[:]
9280                         # Translate poll() timeout args to select() timeout args:
9281                         #
9282                         #          | units        | value(s) for indefinite block
9283                         # ---------|--------------|------------------------------
9284                         #   poll   | milliseconds | omitted, negative, or None
9285                         # ---------|--------------|------------------------------
9286                         #   select | seconds      | omitted
9287                         # ---------|--------------|------------------------------
9288
9289                         if timeout is not None and timeout < 0:
9290                                 timeout = None
9291                         if timeout is not None:
9292                                 select_args.append(timeout / 1000)
9293
9294                 select_events = select.select(*select_args)
9295                 poll_events = []
9296                 for fd in select_events[0]:
9297                         poll_events.append((fd, PollConstants.POLLIN))
9298                 return poll_events
9299
9300 class SequentialTaskQueue(SlotObject):
9301
9302         __slots__ = ("max_jobs", "running_tasks") + \
9303                 ("_dirty", "_scheduling", "_task_queue")
9304
9305         def __init__(self, **kwargs):
9306                 SlotObject.__init__(self, **kwargs)
9307                 self._task_queue = deque()
9308                 self.running_tasks = set()
9309                 if self.max_jobs is None:
9310                         self.max_jobs = 1
9311                 self._dirty = True
9312
9313         def add(self, task):
9314                 self._task_queue.append(task)
9315                 self._dirty = True
9316
9317         def addFront(self, task):
9318                 self._task_queue.appendleft(task)
9319                 self._dirty = True
9320
9321         def schedule(self):
9322
9323                 if not self._dirty:
9324                         return False
9325
9326                 if not self:
9327                         return False
9328
9329                 if self._scheduling:
9330                         # Ignore any recursive schedule() calls triggered via
9331                         # self._task_exit().
9332                         return False
9333
9334                 self._scheduling = True
9335
9336                 task_queue = self._task_queue
9337                 running_tasks = self.running_tasks
9338                 max_jobs = self.max_jobs
9339                 state_changed = False
9340
9341                 while task_queue and \
9342                         (max_jobs is True or len(running_tasks) < max_jobs):
9343                         task = task_queue.popleft()
9344                         cancelled = getattr(task, "cancelled", None)
9345                         if not cancelled:
9346                                 running_tasks.add(task)
9347                                 task.addExitListener(self._task_exit)
9348                                 task.start()
9349                         state_changed = True
9350
9351                 self._dirty = False
9352                 self._scheduling = False
9353
9354                 return state_changed
9355
9356         def _task_exit(self, task):
9357                 """
9358                 Since we can always rely on exit listeners being called, the set of
9359                 running tasks is always pruned automatically and there is never any need
9360                 to actively prune it.
9361                 """
9362                 self.running_tasks.remove(task)
9363                 if self._task_queue:
9364                         self._dirty = True
9365
9366         def clear(self):
9367                 self._task_queue.clear()
9368                 running_tasks = self.running_tasks
9369                 while running_tasks:
9370                         task = running_tasks.pop()
9371                         task.removeExitListener(self._task_exit)
9372                         task.cancel()
9373                 self._dirty = False
9374
9375         def __nonzero__(self):
9376                 return bool(self._task_queue or self.running_tasks)
9377
9378         def __len__(self):
9379                 return len(self._task_queue) + len(self.running_tasks)
9380
9381 _can_poll_device = None
9382
9383 def can_poll_device():
9384         """
9385         Test if it's possible to use poll() on a device such as a pty. This
9386         is known to fail on Darwin.
9387         @rtype: bool
9388         @returns: True if poll() on a device succeeds, False otherwise.
9389         """
9390
9391         global _can_poll_device
9392         if _can_poll_device is not None:
9393                 return _can_poll_device
9394
9395         if not hasattr(select, "poll"):
9396                 _can_poll_device = False
9397                 return _can_poll_device
9398
9399         try:
9400                 dev_null = open('/dev/null', 'rb')
9401         except IOError:
9402                 _can_poll_device = False
9403                 return _can_poll_device
9404
9405         p = select.poll()
9406         p.register(dev_null.fileno(), PollConstants.POLLIN)
9407
9408         invalid_request = False
9409         for f, event in p.poll():
9410                 if event & PollConstants.POLLNVAL:
9411                         invalid_request = True
9412                         break
9413         dev_null.close()
9414
9415         _can_poll_device = not invalid_request
9416         return _can_poll_device
9417
9418 def create_poll_instance():
9419         """
9420         Create an instance of select.poll, or an instance of
9421         PollSelectAdapter there is no poll() implementation or
9422         it is broken somehow.
9423         """
9424         if can_poll_device():
9425                 return select.poll()
9426         return PollSelectAdapter()
9427
9428 getloadavg = getattr(os, "getloadavg", None)
9429 if getloadavg is None:
9430         def getloadavg():
9431                 """
9432                 Uses /proc/loadavg to emulate os.getloadavg().
9433                 Raises OSError if the load average was unobtainable.
9434                 """
9435                 try:
9436                         loadavg_str = open('/proc/loadavg').readline()
9437                 except IOError:
9438                         # getloadavg() is only supposed to raise OSError, so convert
9439                         raise OSError('unknown')
9440                 loadavg_split = loadavg_str.split()
9441                 if len(loadavg_split) < 3:
9442                         raise OSError('unknown')
9443                 loadavg_floats = []
9444                 for i in xrange(3):
9445                         try:
9446                                 loadavg_floats.append(float(loadavg_split[i]))
9447                         except ValueError:
9448                                 raise OSError('unknown')
9449                 return tuple(loadavg_floats)
9450
9451 class PollScheduler(object):
9452
9453         class _sched_iface_class(SlotObject):
9454                 __slots__ = ("register", "schedule", "unregister")
9455
9456         def __init__(self):
9457                 self._max_jobs = 1
9458                 self._max_load = None
9459                 self._jobs = 0
9460                 self._poll_event_queue = []
9461                 self._poll_event_handlers = {}
9462                 self._poll_event_handler_ids = {}
9463                 # Increment id for each new handler.
9464                 self._event_handler_id = 0
9465                 self._poll_obj = create_poll_instance()
9466                 self._scheduling = False
9467
9468         def _schedule(self):
9469                 """
9470                 Calls _schedule_tasks() and automatically returns early from
9471                 any recursive calls to this method that the _schedule_tasks()
9472                 call might trigger. This makes _schedule() safe to call from
9473                 inside exit listeners.
9474                 """
9475                 if self._scheduling:
9476                         return False
9477                 self._scheduling = True
9478                 try:
9479                         return self._schedule_tasks()
9480                 finally:
9481                         self._scheduling = False
9482
9483         def _running_job_count(self):
9484                 return self._jobs
9485
9486         def _can_add_job(self):
9487                 max_jobs = self._max_jobs
9488                 max_load = self._max_load
9489
9490                 if self._max_jobs is not True and \
9491                         self._running_job_count() >= self._max_jobs:
9492                         return False
9493
9494                 if max_load is not None and \
9495                         (max_jobs is True or max_jobs > 1) and \
9496                         self._running_job_count() >= 1:
9497                         try:
9498                                 avg1, avg5, avg15 = getloadavg()
9499                         except OSError:
9500                                 return False
9501
9502                         if avg1 >= max_load:
9503                                 return False
9504
9505                 return True
9506
9507         def _poll(self, timeout=None):
9508                 """
9509                 All poll() calls pass through here. The poll events
9510                 are added directly to self._poll_event_queue.
9511                 In order to avoid endless blocking, this raises
9512                 StopIteration if timeout is None and there are
9513                 no file descriptors to poll.
9514                 """
9515                 if not self._poll_event_handlers:
9516                         self._schedule()
9517                         if timeout is None and \
9518                                 not self._poll_event_handlers:
9519                                 raise StopIteration(
9520                                         "timeout is None and there are no poll() event handlers")
9521
9522                 # The following error is known to occur with Linux kernel versions
9523                 # less than 2.6.24:
9524                 #
9525                 #   select.error: (4, 'Interrupted system call')
9526                 #
9527                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9528                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9529                 # without any events.
9530                 while True:
9531                         try:
9532                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9533                                 break
9534                         except select.error, e:
9535                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9536                                         level=logging.ERROR, noiselevel=-1)
9537                                 del e
9538                                 if timeout is not None:
9539                                         break
9540
9541         def _next_poll_event(self, timeout=None):
9542                 """
9543                 Since the _schedule_wait() loop is called by event
9544                 handlers from _poll_loop(), maintain a central event
9545                 queue for both of them to share events from a single
9546                 poll() call. In order to avoid endless blocking, this
9547                 raises StopIteration if timeout is None and there are
9548                 no file descriptors to poll.
9549                 """
9550                 if not self._poll_event_queue:
9551                         self._poll(timeout)
9552                 return self._poll_event_queue.pop()
9553
9554         def _poll_loop(self):
9555
9556                 event_handlers = self._poll_event_handlers
9557                 event_handled = False
9558
9559                 try:
9560                         while event_handlers:
9561                                 f, event = self._next_poll_event()
9562                                 handler, reg_id = event_handlers[f]
9563                                 handler(f, event)
9564                                 event_handled = True
9565                 except StopIteration:
9566                         event_handled = True
9567
9568                 if not event_handled:
9569                         raise AssertionError("tight loop")
9570
9571         def _schedule_yield(self):
9572                 """
9573                 Schedule for a short period of time chosen by the scheduler based
9574                 on internal state. Synchronous tasks should call this periodically
9575                 in order to allow the scheduler to service pending poll events. The
9576                 scheduler will call poll() exactly once, without blocking, and any
9577                 resulting poll events will be serviced.
9578                 """
9579                 event_handlers = self._poll_event_handlers
9580                 events_handled = 0
9581
9582                 if not event_handlers:
9583                         return bool(events_handled)
9584
9585                 if not self._poll_event_queue:
9586                         self._poll(0)
9587
9588                 try:
9589                         while event_handlers and self._poll_event_queue:
9590                                 f, event = self._next_poll_event()
9591                                 handler, reg_id = event_handlers[f]
9592                                 handler(f, event)
9593                                 events_handled += 1
9594                 except StopIteration:
9595                         events_handled += 1
9596
9597                 return bool(events_handled)
9598
9599         def _register(self, f, eventmask, handler):
9600                 """
9601                 @rtype: Integer
9602                 @return: A unique registration id, for use in schedule() or
9603                         unregister() calls.
9604                 """
9605                 if f in self._poll_event_handlers:
9606                         raise AssertionError("fd %d is already registered" % f)
9607                 self._event_handler_id += 1
9608                 reg_id = self._event_handler_id
9609                 self._poll_event_handler_ids[reg_id] = f
9610                 self._poll_event_handlers[f] = (handler, reg_id)
9611                 self._poll_obj.register(f, eventmask)
9612                 return reg_id
9613
9614         def _unregister(self, reg_id):
9615                 f = self._poll_event_handler_ids[reg_id]
9616                 self._poll_obj.unregister(f)
9617                 del self._poll_event_handlers[f]
9618                 del self._poll_event_handler_ids[reg_id]
9619
9620         def _schedule_wait(self, wait_ids):
9621                 """
9622                 Schedule until wait_id is not longer registered
9623                 for poll() events.
9624                 @type wait_id: int
9625                 @param wait_id: a task id to wait for
9626                 """
9627                 event_handlers = self._poll_event_handlers
9628                 handler_ids = self._poll_event_handler_ids
9629                 event_handled = False
9630
9631                 if isinstance(wait_ids, int):
9632                         wait_ids = frozenset([wait_ids])
9633
9634                 try:
9635                         while wait_ids.intersection(handler_ids):
9636                                 f, event = self._next_poll_event()
9637                                 handler, reg_id = event_handlers[f]
9638                                 handler(f, event)
9639                                 event_handled = True
9640                 except StopIteration:
9641                         event_handled = True
9642
9643                 return event_handled
9644
9645 class QueueScheduler(PollScheduler):
9646
9647         """
9648         Add instances of SequentialTaskQueue and then call run(). The
9649         run() method returns when no tasks remain.
9650         """
9651
9652         def __init__(self, max_jobs=None, max_load=None):
9653                 PollScheduler.__init__(self)
9654
9655                 if max_jobs is None:
9656                         max_jobs = 1
9657
9658                 self._max_jobs = max_jobs
9659                 self._max_load = max_load
9660                 self.sched_iface = self._sched_iface_class(
9661                         register=self._register,
9662                         schedule=self._schedule_wait,
9663                         unregister=self._unregister)
9664
9665                 self._queues = []
9666                 self._schedule_listeners = []
9667
9668         def add(self, q):
9669                 self._queues.append(q)
9670
9671         def remove(self, q):
9672                 self._queues.remove(q)
9673
9674         def run(self):
9675
9676                 while self._schedule():
9677                         self._poll_loop()
9678
9679                 while self._running_job_count():
9680                         self._poll_loop()
9681
9682         def _schedule_tasks(self):
9683                 """
9684                 @rtype: bool
9685                 @returns: True if there may be remaining tasks to schedule,
9686                         False otherwise.
9687                 """
9688                 while self._can_add_job():
9689                         n = self._max_jobs - self._running_job_count()
9690                         if n < 1:
9691                                 break
9692
9693                         if not self._start_next_job(n):
9694                                 return False
9695
9696                 for q in self._queues:
9697                         if q:
9698                                 return True
9699                 return False
9700
9701         def _running_job_count(self):
9702                 job_count = 0
9703                 for q in self._queues:
9704                         job_count += len(q.running_tasks)
9705                 self._jobs = job_count
9706                 return job_count
9707
9708         def _start_next_job(self, n=1):
9709                 started_count = 0
9710                 for q in self._queues:
9711                         initial_job_count = len(q.running_tasks)
9712                         q.schedule()
9713                         final_job_count = len(q.running_tasks)
9714                         if final_job_count > initial_job_count:
9715                                 started_count += (final_job_count - initial_job_count)
9716                         if started_count >= n:
9717                                 break
9718                 return started_count
9719
9720 class TaskScheduler(object):
9721
9722         """
9723         A simple way to handle scheduling of AsynchrousTask instances. Simply
9724         add tasks and call run(). The run() method returns when no tasks remain.
9725         """
9726
9727         def __init__(self, max_jobs=None, max_load=None):
9728                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9729                 self._scheduler = QueueScheduler(
9730                         max_jobs=max_jobs, max_load=max_load)
9731                 self.sched_iface = self._scheduler.sched_iface
9732                 self.run = self._scheduler.run
9733                 self._scheduler.add(self._queue)
9734
9735         def add(self, task):
9736                 self._queue.add(task)
9737
9738 class JobStatusDisplay(object):
9739
9740         _bound_properties = ("curval", "failed", "running")
9741         _jobs_column_width = 48
9742
9743         # Don't update the display unless at least this much
9744         # time has passed, in units of seconds.
9745         _min_display_latency = 2
9746
9747         _default_term_codes = {
9748                 'cr'  : '\r',
9749                 'el'  : '\x1b[K',
9750                 'nel' : '\n',
9751         }
9752
9753         _termcap_name_map = {
9754                 'carriage_return' : 'cr',
9755                 'clr_eol'         : 'el',
9756                 'newline'         : 'nel',
9757         }
9758
9759         def __init__(self, out=sys.stdout, quiet=False):
9760                 object.__setattr__(self, "out", out)
9761                 object.__setattr__(self, "quiet", quiet)
9762                 object.__setattr__(self, "maxval", 0)
9763                 object.__setattr__(self, "merges", 0)
9764                 object.__setattr__(self, "_changed", False)
9765                 object.__setattr__(self, "_displayed", False)
9766                 object.__setattr__(self, "_last_display_time", 0)
9767                 object.__setattr__(self, "width", 80)
9768                 self.reset()
9769
9770                 isatty = hasattr(out, "isatty") and out.isatty()
9771                 object.__setattr__(self, "_isatty", isatty)
9772                 if not isatty or not self._init_term():
9773                         term_codes = {}
9774                         for k, capname in self._termcap_name_map.iteritems():
9775                                 term_codes[k] = self._default_term_codes[capname]
9776                         object.__setattr__(self, "_term_codes", term_codes)
9777                 encoding = sys.getdefaultencoding()
9778                 for k, v in self._term_codes.items():
9779                         if not isinstance(v, basestring):
9780                                 self._term_codes[k] = v.decode(encoding, 'replace')
9781
9782         def _init_term(self):
9783                 """
9784                 Initialize term control codes.
9785                 @rtype: bool
9786                 @returns: True if term codes were successfully initialized,
9787                         False otherwise.
9788                 """
9789
9790                 term_type = os.environ.get("TERM", "vt100")
9791                 tigetstr = None
9792
9793                 try:
9794                         import curses
9795                         try:
9796                                 curses.setupterm(term_type, self.out.fileno())
9797                                 tigetstr = curses.tigetstr
9798                         except curses.error:
9799                                 pass
9800                 except ImportError:
9801                         pass
9802
9803                 if tigetstr is None:
9804                         return False
9805
9806                 term_codes = {}
9807                 for k, capname in self._termcap_name_map.iteritems():
9808                         code = tigetstr(capname)
9809                         if code is None:
9810                                 code = self._default_term_codes[capname]
9811                         term_codes[k] = code
9812                 object.__setattr__(self, "_term_codes", term_codes)
9813                 return True
9814
9815         def _format_msg(self, msg):
9816                 return ">>> %s" % msg
9817
9818         def _erase(self):
9819                 self.out.write(
9820                         self._term_codes['carriage_return'] + \
9821                         self._term_codes['clr_eol'])
9822                 self.out.flush()
9823                 self._displayed = False
9824
9825         def _display(self, line):
9826                 self.out.write(line)
9827                 self.out.flush()
9828                 self._displayed = True
9829
9830         def _update(self, msg):
9831
9832                 out = self.out
9833                 if not self._isatty:
9834                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9835                         self.out.flush()
9836                         self._displayed = True
9837                         return
9838
9839                 if self._displayed:
9840                         self._erase()
9841
9842                 self._display(self._format_msg(msg))
9843
9844         def displayMessage(self, msg):
9845
9846                 was_displayed = self._displayed
9847
9848                 if self._isatty and self._displayed:
9849                         self._erase()
9850
9851                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9852                 self.out.flush()
9853                 self._displayed = False
9854
9855                 if was_displayed:
9856                         self._changed = True
9857                         self.display()
9858
9859         def reset(self):
9860                 self.maxval = 0
9861                 self.merges = 0
9862                 for name in self._bound_properties:
9863                         object.__setattr__(self, name, 0)
9864
9865                 if self._displayed:
9866                         self.out.write(self._term_codes['newline'])
9867                         self.out.flush()
9868                         self._displayed = False
9869
9870         def __setattr__(self, name, value):
9871                 old_value = getattr(self, name)
9872                 if value == old_value:
9873                         return
9874                 object.__setattr__(self, name, value)
9875                 if name in self._bound_properties:
9876                         self._property_change(name, old_value, value)
9877
9878         def _property_change(self, name, old_value, new_value):
9879                 self._changed = True
9880                 self.display()
9881
9882         def _load_avg_str(self):
9883                 try:
9884                         avg = getloadavg()
9885                 except OSError:
9886                         return 'unknown'
9887
9888                 max_avg = max(avg)
9889
9890                 if max_avg < 10:
9891                         digits = 2
9892                 elif max_avg < 100:
9893                         digits = 1
9894                 else:
9895                         digits = 0
9896
9897                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9898
9899         def display(self):
9900                 """
9901                 Display status on stdout, but only if something has
9902                 changed since the last call.
9903                 """
9904
9905                 if self.quiet:
9906                         return
9907
9908                 current_time = time.time()
9909                 time_delta = current_time - self._last_display_time
9910                 if self._displayed and \
9911                         not self._changed:
9912                         if not self._isatty:
9913                                 return
9914                         if time_delta < self._min_display_latency:
9915                                 return
9916
9917                 self._last_display_time = current_time
9918                 self._changed = False
9919                 self._display_status()
9920
9921         def _display_status(self):
9922                 # Don't use len(self._completed_tasks) here since that also
9923                 # can include uninstall tasks.
9924                 curval_str = str(self.curval)
9925                 maxval_str = str(self.maxval)
9926                 running_str = str(self.running)
9927                 failed_str = str(self.failed)
9928                 load_avg_str = self._load_avg_str()
9929
9930                 color_output = StringIO()
9931                 plain_output = StringIO()
9932                 style_file = portage.output.ConsoleStyleFile(color_output)
9933                 style_file.write_listener = plain_output
9934                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9935                 style_writer.style_listener = style_file.new_styles
9936                 f = formatter.AbstractFormatter(style_writer)
9937
9938                 number_style = "INFORM"
9939                 f.add_literal_data("Jobs: ")
9940                 f.push_style(number_style)
9941                 f.add_literal_data(curval_str)
9942                 f.pop_style()
9943                 f.add_literal_data(" of ")
9944                 f.push_style(number_style)
9945                 f.add_literal_data(maxval_str)
9946                 f.pop_style()
9947                 f.add_literal_data(" complete")
9948
9949                 if self.running:
9950                         f.add_literal_data(", ")
9951                         f.push_style(number_style)
9952                         f.add_literal_data(running_str)
9953                         f.pop_style()
9954                         f.add_literal_data(" running")
9955
9956                 if self.failed:
9957                         f.add_literal_data(", ")
9958                         f.push_style(number_style)
9959                         f.add_literal_data(failed_str)
9960                         f.pop_style()
9961                         f.add_literal_data(" failed")
9962
9963                 padding = self._jobs_column_width - len(plain_output.getvalue())
9964                 if padding > 0:
9965                         f.add_literal_data(padding * " ")
9966
9967                 f.add_literal_data("Load avg: ")
9968                 f.add_literal_data(load_avg_str)
9969
9970                 # Truncate to fit width, to avoid making the terminal scroll if the
9971                 # line overflows (happens when the load average is large).
9972                 plain_output = plain_output.getvalue()
9973                 if self._isatty and len(plain_output) > self.width:
9974                         # Use plain_output here since it's easier to truncate
9975                         # properly than the color output which contains console
9976                         # color codes.
9977                         self._update(plain_output[:self.width])
9978                 else:
9979                         self._update(color_output.getvalue())
9980
9981                 xtermTitle(" ".join(plain_output.split()))
9982
9983 class Scheduler(PollScheduler):
9984
9985         _opts_ignore_blockers = \
9986                 frozenset(["--buildpkgonly",
9987                 "--fetchonly", "--fetch-all-uri",
9988                 "--nodeps", "--pretend"])
9989
9990         _opts_no_background = \
9991                 frozenset(["--pretend",
9992                 "--fetchonly", "--fetch-all-uri"])
9993
9994         _opts_no_restart = frozenset(["--buildpkgonly",
9995                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9996
9997         _bad_resume_opts = set(["--ask", "--changelog",
9998                 "--resume", "--skipfirst"])
9999
10000         _fetch_log = "/var/log/emerge-fetch.log"
10001
10002         class _iface_class(SlotObject):
10003                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10004                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10005                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
10006                         "unregister")
10007
10008         class _fetch_iface_class(SlotObject):
10009                 __slots__ = ("log_file", "schedule")
10010
10011         _task_queues_class = slot_dict_class(
10012                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10013
10014         class _build_opts_class(SlotObject):
10015                 __slots__ = ("buildpkg", "buildpkgonly",
10016                         "fetch_all_uri", "fetchonly", "pretend")
10017
10018         class _binpkg_opts_class(SlotObject):
10019                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10020
10021         class _pkg_count_class(SlotObject):
10022                 __slots__ = ("curval", "maxval")
10023
10024         class _emerge_log_class(SlotObject):
10025                 __slots__ = ("xterm_titles",)
10026
10027                 def log(self, *pargs, **kwargs):
10028                         if not self.xterm_titles:
10029                                 # Avoid interference with the scheduler's status display.
10030                                 kwargs.pop("short_msg", None)
10031                         emergelog(self.xterm_titles, *pargs, **kwargs)
10032
10033         class _failed_pkg(SlotObject):
10034                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10035
10036         class _ConfigPool(object):
10037                 """Interface for a task to temporarily allocate a config
10038                 instance from a pool. This allows a task to be constructed
10039                 long before the config instance actually becomes needed, like
10040                 when prefetchers are constructed for the whole merge list."""
10041                 __slots__ = ("_root", "_allocate", "_deallocate")
10042                 def __init__(self, root, allocate, deallocate):
10043                         self._root = root
10044                         self._allocate = allocate
10045                         self._deallocate = deallocate
10046                 def allocate(self):
10047                         return self._allocate(self._root)
10048                 def deallocate(self, settings):
10049                         self._deallocate(settings)
10050
10051         class _unknown_internal_error(portage.exception.PortageException):
10052                 """
10053                 Used internally to terminate scheduling. The specific reason for
10054                 the failure should have been dumped to stderr.
10055                 """
10056                 def __init__(self, value=""):
10057                         portage.exception.PortageException.__init__(self, value)
10058
10059         def __init__(self, settings, trees, mtimedb, myopts,
10060                 spinner, mergelist, favorites, digraph):
10061                 PollScheduler.__init__(self)
10062                 self.settings = settings
10063                 self.target_root = settings["ROOT"]
10064                 self.trees = trees
10065                 self.myopts = myopts
10066                 self._spinner = spinner
10067                 self._mtimedb = mtimedb
10068                 self._mergelist = mergelist
10069                 self._favorites = favorites
10070                 self._args_set = InternalPackageSet(favorites)
10071                 self._build_opts = self._build_opts_class()
10072                 for k in self._build_opts.__slots__:
10073                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10074                 self._binpkg_opts = self._binpkg_opts_class()
10075                 for k in self._binpkg_opts.__slots__:
10076                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10077
10078                 self.curval = 0
10079                 self._logger = self._emerge_log_class()
10080                 self._task_queues = self._task_queues_class()
10081                 for k in self._task_queues.allowed_keys:
10082                         setattr(self._task_queues, k,
10083                                 SequentialTaskQueue())
10084
10085                 # Holds merges that will wait to be executed when no builds are
10086                 # executing. This is useful for system packages since dependencies
10087                 # on system packages are frequently unspecified.
10088                 self._merge_wait_queue = []
10089                 # Holds merges that have been transfered from the merge_wait_queue to
10090                 # the actual merge queue. They are removed from this list upon
10091                 # completion. Other packages can start building only when this list is
10092                 # empty.
10093                 self._merge_wait_scheduled = []
10094
10095                 # Holds system packages and their deep runtime dependencies. Before
10096                 # being merged, these packages go to merge_wait_queue, to be merged
10097                 # when no other packages are building.
10098                 self._deep_system_deps = set()
10099
10100                 # Holds packages to merge which will satisfy currently unsatisfied
10101                 # deep runtime dependencies of system packages. If this is not empty
10102                 # then no parallel builds will be spawned until it is empty. This
10103                 # minimizes the possibility that a build will fail due to the system
10104                 # being in a fragile state. For example, see bug #259954.
10105                 self._unsatisfied_system_deps = set()
10106
10107                 self._status_display = JobStatusDisplay()
10108                 self._max_load = myopts.get("--load-average")
10109                 max_jobs = myopts.get("--jobs")
10110                 if max_jobs is None:
10111                         max_jobs = 1
10112                 self._set_max_jobs(max_jobs)
10113
10114                 # The root where the currently running
10115                 # portage instance is installed.
10116                 self._running_root = trees["/"]["root_config"]
10117                 self.edebug = 0
10118                 if settings.get("PORTAGE_DEBUG", "") == "1":
10119                         self.edebug = 1
10120                 self.pkgsettings = {}
10121                 self._config_pool = {}
10122                 self._blocker_db = {}
10123                 for root in trees:
10124                         self._config_pool[root] = []
10125                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10126
10127                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10128                         schedule=self._schedule_fetch)
10129                 self._sched_iface = self._iface_class(
10130                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10131                         dblinkDisplayMerge=self._dblink_display_merge,
10132                         dblinkElog=self._dblink_elog,
10133                         dblinkEmergeLog=self._dblink_emerge_log,
10134                         fetch=fetch_iface, register=self._register,
10135                         schedule=self._schedule_wait,
10136                         scheduleSetup=self._schedule_setup,
10137                         scheduleUnpack=self._schedule_unpack,
10138                         scheduleYield=self._schedule_yield,
10139                         unregister=self._unregister)
10140
10141                 self._prefetchers = weakref.WeakValueDictionary()
10142                 self._pkg_queue = []
10143                 self._completed_tasks = set()
10144
10145                 self._failed_pkgs = []
10146                 self._failed_pkgs_all = []
10147                 self._failed_pkgs_die_msgs = []
10148                 self._post_mod_echo_msgs = []
10149                 self._parallel_fetch = False
10150                 merge_count = len([x for x in mergelist \
10151                         if isinstance(x, Package) and x.operation == "merge"])
10152                 self._pkg_count = self._pkg_count_class(
10153                         curval=0, maxval=merge_count)
10154                 self._status_display.maxval = self._pkg_count.maxval
10155
10156                 # The load average takes some time to respond when new
10157                 # jobs are added, so we need to limit the rate of adding
10158                 # new jobs.
10159                 self._job_delay_max = 10
10160                 self._job_delay_factor = 1.0
10161                 self._job_delay_exp = 1.5
10162                 self._previous_job_start_time = None
10163
10164                 self._set_digraph(digraph)
10165
10166                 # This is used to memoize the _choose_pkg() result when
10167                 # no packages can be chosen until one of the existing
10168                 # jobs completes.
10169                 self._choose_pkg_return_early = False
10170
10171                 features = self.settings.features
10172                 if "parallel-fetch" in features and \
10173                         not ("--pretend" in self.myopts or \
10174                         "--fetch-all-uri" in self.myopts or \
10175                         "--fetchonly" in self.myopts):
10176                         if "distlocks" not in features:
10177                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10178                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10179                                         "requires the distlocks feature enabled"+"\n",
10180                                         noiselevel=-1)
10181                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10182                                         "thus parallel-fetching is being disabled"+"\n",
10183                                         noiselevel=-1)
10184                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10185                         elif len(mergelist) > 1:
10186                                 self._parallel_fetch = True
10187
10188                 if self._parallel_fetch:
10189                                 # clear out existing fetch log if it exists
10190                                 try:
10191                                         open(self._fetch_log, 'w')
10192                                 except EnvironmentError:
10193                                         pass
10194
10195                 self._running_portage = None
10196                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10197                         portage.const.PORTAGE_PACKAGE_ATOM)
10198                 if portage_match:
10199                         cpv = portage_match.pop()
10200                         self._running_portage = self._pkg(cpv, "installed",
10201                                 self._running_root, installed=True)
10202
10203         def _poll(self, timeout=None):
10204                 self._schedule()
10205                 PollScheduler._poll(self, timeout=timeout)
10206
10207         def _set_max_jobs(self, max_jobs):
10208                 self._max_jobs = max_jobs
10209                 self._task_queues.jobs.max_jobs = max_jobs
10210
10211         def _background_mode(self):
10212                 """
10213                 Check if background mode is enabled and adjust states as necessary.
10214
10215                 @rtype: bool
10216                 @returns: True if background mode is enabled, False otherwise.
10217                 """
10218                 background = (self._max_jobs is True or \
10219                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10220                         not bool(self._opts_no_background.intersection(self.myopts))
10221
10222                 if background:
10223                         interactive_tasks = self._get_interactive_tasks()
10224                         if interactive_tasks:
10225                                 background = False
10226                                 writemsg_level(">>> Sending package output to stdio due " + \
10227                                         "to interactive package(s):\n",
10228                                         level=logging.INFO, noiselevel=-1)
10229                                 msg = [""]
10230                                 for pkg in interactive_tasks:
10231                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10232                                         if pkg.root != "/":
10233                                                 pkg_str += " for " + pkg.root
10234                                         msg.append(pkg_str)
10235                                 msg.append("")
10236                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10237                                         level=logging.INFO, noiselevel=-1)
10238                                 if self._max_jobs is True or self._max_jobs > 1:
10239                                         self._set_max_jobs(1)
10240                                         writemsg_level(">>> Setting --jobs=1 due " + \
10241                                                 "to the above interactive package(s)\n",
10242                                                 level=logging.INFO, noiselevel=-1)
10243
10244                 self._status_display.quiet = \
10245                         not background or \
10246                         ("--quiet" in self.myopts and \
10247                         "--verbose" not in self.myopts)
10248
10249                 self._logger.xterm_titles = \
10250                         "notitles" not in self.settings.features and \
10251                         self._status_display.quiet
10252
10253                 return background
10254
10255         def _get_interactive_tasks(self):
10256                 from portage import flatten
10257                 from portage.dep import use_reduce, paren_reduce
10258                 interactive_tasks = []
10259                 for task in self._mergelist:
10260                         if not (isinstance(task, Package) and \
10261                                 task.operation == "merge"):
10262                                 continue
10263                         try:
10264                                 properties = flatten(use_reduce(paren_reduce(
10265                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10266                         except portage.exception.InvalidDependString, e:
10267                                 show_invalid_depstring_notice(task,
10268                                         task.metadata["PROPERTIES"], str(e))
10269                                 raise self._unknown_internal_error()
10270                         if "interactive" in properties:
10271                                 interactive_tasks.append(task)
10272                 return interactive_tasks
10273
10274         def _set_digraph(self, digraph):
10275                 if "--nodeps" in self.myopts or \
10276                         (self._max_jobs is not True and self._max_jobs < 2):
10277                         # save some memory
10278                         self._digraph = None
10279                         return
10280
10281                 self._digraph = digraph
10282                 self._find_system_deps()
10283                 self._prune_digraph()
10284                 self._prevent_builddir_collisions()
10285
10286         def _find_system_deps(self):
10287                 """
10288                 Find system packages and their deep runtime dependencies. Before being
10289                 merged, these packages go to merge_wait_queue, to be merged when no
10290                 other packages are building.
10291                 """
10292                 deep_system_deps = self._deep_system_deps
10293                 deep_system_deps.clear()
10294                 deep_system_deps.update(
10295                         _find_deep_system_runtime_deps(self._digraph))
10296                 deep_system_deps.difference_update([pkg for pkg in \
10297                         deep_system_deps if pkg.operation != "merge"])
10298
10299         def _prune_digraph(self):
10300                 """
10301                 Prune any root nodes that are irrelevant.
10302                 """
10303
10304                 graph = self._digraph
10305                 completed_tasks = self._completed_tasks
10306                 removed_nodes = set()
10307                 while True:
10308                         for node in graph.root_nodes():
10309                                 if not isinstance(node, Package) or \
10310                                         (node.installed and node.operation == "nomerge") or \
10311                                         node.onlydeps or \
10312                                         node in completed_tasks:
10313                                         removed_nodes.add(node)
10314                         if removed_nodes:
10315                                 graph.difference_update(removed_nodes)
10316                         if not removed_nodes:
10317                                 break
10318                         removed_nodes.clear()
10319
10320         def _prevent_builddir_collisions(self):
10321                 """
10322                 When building stages, sometimes the same exact cpv needs to be merged
10323                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10324                 in the builddir. Currently, normal file locks would be inappropriate
10325                 for this purpose since emerge holds all of it's build dir locks from
10326                 the main process.
10327                 """
10328                 cpv_map = {}
10329                 for pkg in self._mergelist:
10330                         if not isinstance(pkg, Package):
10331                                 # a satisfied blocker
10332                                 continue
10333                         if pkg.installed:
10334                                 continue
10335                         if pkg.cpv not in cpv_map:
10336                                 cpv_map[pkg.cpv] = [pkg]
10337                                 continue
10338                         for earlier_pkg in cpv_map[pkg.cpv]:
10339                                 self._digraph.add(earlier_pkg, pkg,
10340                                         priority=DepPriority(buildtime=True))
10341                         cpv_map[pkg.cpv].append(pkg)
10342
10343         class _pkg_failure(portage.exception.PortageException):
10344                 """
10345                 An instance of this class is raised by unmerge() when
10346                 an uninstallation fails.
10347                 """
10348                 status = 1
10349                 def __init__(self, *pargs):
10350                         portage.exception.PortageException.__init__(self, pargs)
10351                         if pargs:
10352                                 self.status = pargs[0]
10353
10354         def _schedule_fetch(self, fetcher):
10355                 """
10356                 Schedule a fetcher on the fetch queue, in order to
10357                 serialize access to the fetch log.
10358                 """
10359                 self._task_queues.fetch.addFront(fetcher)
10360
10361         def _schedule_setup(self, setup_phase):
10362                 """
10363                 Schedule a setup phase on the merge queue, in order to
10364                 serialize unsandboxed access to the live filesystem.
10365                 """
10366                 self._task_queues.merge.addFront(setup_phase)
10367                 self._schedule()
10368
10369         def _schedule_unpack(self, unpack_phase):
10370                 """
10371                 Schedule an unpack phase on the unpack queue, in order
10372                 to serialize $DISTDIR access for live ebuilds.
10373                 """
10374                 self._task_queues.unpack.add(unpack_phase)
10375
10376         def _find_blockers(self, new_pkg):
10377                 """
10378                 Returns a callable which should be called only when
10379                 the vdb lock has been acquired.
10380                 """
10381                 def get_blockers():
10382                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10383                 return get_blockers
10384
10385         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10386                 if self._opts_ignore_blockers.intersection(self.myopts):
10387                         return None
10388
10389                 # Call gc.collect() here to avoid heap overflow that
10390                 # triggers 'Cannot allocate memory' errors (reported
10391                 # with python-2.5).
10392                 import gc
10393                 gc.collect()
10394
10395                 blocker_db = self._blocker_db[new_pkg.root]
10396
10397                 blocker_dblinks = []
10398                 for blocking_pkg in blocker_db.findInstalledBlockers(
10399                         new_pkg, acquire_lock=acquire_lock):
10400                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10401                                 continue
10402                         if new_pkg.cpv == blocking_pkg.cpv:
10403                                 continue
10404                         blocker_dblinks.append(portage.dblink(
10405                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10406                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10407                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10408
10409                 gc.collect()
10410
10411                 return blocker_dblinks
10412
10413         def _dblink_pkg(self, pkg_dblink):
10414                 cpv = pkg_dblink.mycpv
10415                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10416                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10417                 installed = type_name == "installed"
10418                 return self._pkg(cpv, type_name, root_config, installed=installed)
10419
10420         def _append_to_log_path(self, log_path, msg):
10421                 f = open(log_path, 'a')
10422                 try:
10423                         f.write(msg)
10424                 finally:
10425                         f.close()
10426
10427         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10428
10429                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10430                 log_file = None
10431                 out = sys.stdout
10432                 background = self._background
10433
10434                 if background and log_path is not None:
10435                         log_file = open(log_path, 'a')
10436                         out = log_file
10437
10438                 try:
10439                         for msg in msgs:
10440                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10441                 finally:
10442                         if log_file is not None:
10443                                 log_file.close()
10444
10445         def _dblink_emerge_log(self, msg):
10446                 self._logger.log(msg)
10447
10448         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10449                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10450                 background = self._background
10451
10452                 if log_path is None:
10453                         if not (background and level < logging.WARN):
10454                                 portage.util.writemsg_level(msg,
10455                                         level=level, noiselevel=noiselevel)
10456                 else:
10457                         if not background:
10458                                 portage.util.writemsg_level(msg,
10459                                         level=level, noiselevel=noiselevel)
10460                         self._append_to_log_path(log_path, msg)
10461
10462         def _dblink_ebuild_phase(self,
10463                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10464                 """
10465                 Using this callback for merge phases allows the scheduler
10466                 to run while these phases execute asynchronously, and allows
10467                 the scheduler control output handling.
10468                 """
10469
10470                 scheduler = self._sched_iface
10471                 settings = pkg_dblink.settings
10472                 pkg = self._dblink_pkg(pkg_dblink)
10473                 background = self._background
10474                 log_path = settings.get("PORTAGE_LOG_FILE")
10475
10476                 ebuild_phase = EbuildPhase(background=background,
10477                         pkg=pkg, phase=phase, scheduler=scheduler,
10478                         settings=settings, tree=pkg_dblink.treetype)
10479                 ebuild_phase.start()
10480                 ebuild_phase.wait()
10481
10482                 return ebuild_phase.returncode
10483
10484         def _generate_digests(self):
10485                 """
10486                 Generate digests if necessary for --digests or FEATURES=digest.
10487                 In order to avoid interference, this must done before parallel
10488                 tasks are started.
10489                 """
10490
10491                 if '--fetchonly' in self.myopts:
10492                         return os.EX_OK
10493
10494                 digest = '--digest' in self.myopts
10495                 if not digest:
10496                         for pkgsettings in self.pkgsettings.itervalues():
10497                                 if 'digest' in pkgsettings.features:
10498                                         digest = True
10499                                         break
10500
10501                 if not digest:
10502                         return os.EX_OK
10503
10504                 for x in self._mergelist:
10505                         if not isinstance(x, Package) or \
10506                                 x.type_name != 'ebuild' or \
10507                                 x.operation != 'merge':
10508                                 continue
10509                         pkgsettings = self.pkgsettings[x.root]
10510                         if '--digest' not in self.myopts and \
10511                                 'digest' not in pkgsettings.features:
10512                                 continue
10513                         portdb = x.root_config.trees['porttree'].dbapi
10514                         ebuild_path = portdb.findname(x.cpv)
10515                         if not ebuild_path:
10516                                 writemsg_level(
10517                                         "!!! Could not locate ebuild for '%s'.\n" \
10518                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10519                                 return 1
10520                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10521                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10522                                 writemsg_level(
10523                                         "!!! Unable to generate manifest for '%s'.\n" \
10524                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10525                                 return 1
10526
10527                 return os.EX_OK
10528
10529         def _check_manifests(self):
10530                 # Verify all the manifests now so that the user is notified of failure
10531                 # as soon as possible.
10532                 if "strict" not in self.settings.features or \
10533                         "--fetchonly" in self.myopts or \
10534                         "--fetch-all-uri" in self.myopts:
10535                         return os.EX_OK
10536
10537                 shown_verifying_msg = False
10538                 quiet_settings = {}
10539                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10540                         quiet_config = portage.config(clone=pkgsettings)
10541                         quiet_config["PORTAGE_QUIET"] = "1"
10542                         quiet_config.backup_changes("PORTAGE_QUIET")
10543                         quiet_settings[myroot] = quiet_config
10544                         del quiet_config
10545
10546                 for x in self._mergelist:
10547                         if not isinstance(x, Package) or \
10548                                 x.type_name != "ebuild":
10549                                 continue
10550
10551                         if not shown_verifying_msg:
10552                                 shown_verifying_msg = True
10553                                 self._status_msg("Verifying ebuild manifests")
10554
10555                         root_config = x.root_config
10556                         portdb = root_config.trees["porttree"].dbapi
10557                         quiet_config = quiet_settings[root_config.root]
10558                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10559                         if not portage.digestcheck([], quiet_config, strict=True):
10560                                 return 1
10561
10562                 return os.EX_OK
10563
10564         def _add_prefetchers(self):
10565
10566                 if not self._parallel_fetch:
10567                         return
10568
10569                 if self._parallel_fetch:
10570                         self._status_msg("Starting parallel fetch")
10571
10572                         prefetchers = self._prefetchers
10573                         getbinpkg = "--getbinpkg" in self.myopts
10574
10575                         # In order to avoid "waiting for lock" messages
10576                         # at the beginning, which annoy users, never
10577                         # spawn a prefetcher for the first package.
10578                         for pkg in self._mergelist[1:]:
10579                                 prefetcher = self._create_prefetcher(pkg)
10580                                 if prefetcher is not None:
10581                                         self._task_queues.fetch.add(prefetcher)
10582                                         prefetchers[pkg] = prefetcher
10583
10584         def _create_prefetcher(self, pkg):
10585                 """
10586                 @return: a prefetcher, or None if not applicable
10587                 """
10588                 prefetcher = None
10589
10590                 if not isinstance(pkg, Package):
10591                         pass
10592
10593                 elif pkg.type_name == "ebuild":
10594
10595                         prefetcher = EbuildFetcher(background=True,
10596                                 config_pool=self._ConfigPool(pkg.root,
10597                                 self._allocate_config, self._deallocate_config),
10598                                 fetchonly=1, logfile=self._fetch_log,
10599                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10600
10601                 elif pkg.type_name == "binary" and \
10602                         "--getbinpkg" in self.myopts and \
10603                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10604
10605                         prefetcher = BinpkgPrefetcher(background=True,
10606                                 pkg=pkg, scheduler=self._sched_iface)
10607
10608                 return prefetcher
10609
10610         def _is_restart_scheduled(self):
10611                 """
10612                 Check if the merge list contains a replacement
10613                 for the current running instance, that will result
10614                 in restart after merge.
10615                 @rtype: bool
10616                 @returns: True if a restart is scheduled, False otherwise.
10617                 """
10618                 if self._opts_no_restart.intersection(self.myopts):
10619                         return False
10620
10621                 mergelist = self._mergelist
10622
10623                 for i, pkg in enumerate(mergelist):
10624                         if self._is_restart_necessary(pkg) and \
10625                                 i != len(mergelist) - 1:
10626                                 return True
10627
10628                 return False
10629
10630         def _is_restart_necessary(self, pkg):
10631                 """
10632                 @return: True if merging the given package
10633                         requires restart, False otherwise.
10634                 """
10635
10636                 # Figure out if we need a restart.
10637                 if pkg.root == self._running_root.root and \
10638                         portage.match_from_list(
10639                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10640                         if self._running_portage:
10641                                 return pkg.cpv != self._running_portage.cpv
10642                         return True
10643                 return False
10644
10645         def _restart_if_necessary(self, pkg):
10646                 """
10647                 Use execv() to restart emerge. This happens
10648                 if portage upgrades itself and there are
10649                 remaining packages in the list.
10650                 """
10651
10652                 if self._opts_no_restart.intersection(self.myopts):
10653                         return
10654
10655                 if not self._is_restart_necessary(pkg):
10656                         return
10657
10658                 if pkg == self._mergelist[-1]:
10659                         return
10660
10661                 self._main_loop_cleanup()
10662
10663                 logger = self._logger
10664                 pkg_count = self._pkg_count
10665                 mtimedb = self._mtimedb
10666                 bad_resume_opts = self._bad_resume_opts
10667
10668                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10669                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10670
10671                 logger.log(" *** RESTARTING " + \
10672                         "emerge via exec() after change of " + \
10673                         "portage version.")
10674
10675                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10676                 mtimedb.commit()
10677                 portage.run_exitfuncs()
10678                 mynewargv = [sys.argv[0], "--resume"]
10679                 resume_opts = self.myopts.copy()
10680                 # For automatic resume, we need to prevent
10681                 # any of bad_resume_opts from leaking in
10682                 # via EMERGE_DEFAULT_OPTS.
10683                 resume_opts["--ignore-default-opts"] = True
10684                 for myopt, myarg in resume_opts.iteritems():
10685                         if myopt not in bad_resume_opts:
10686                                 if myarg is True:
10687                                         mynewargv.append(myopt)
10688                                 else:
10689                                         mynewargv.append(myopt +"="+ str(myarg))
10690                 # priority only needs to be adjusted on the first run
10691                 os.environ["PORTAGE_NICENESS"] = "0"
10692                 os.execv(mynewargv[0], mynewargv)
10693
10694         def merge(self):
10695
10696                 if "--resume" in self.myopts:
10697                         # We're resuming.
10698                         portage.writemsg_stdout(
10699                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10700                         self._logger.log(" *** Resuming merge...")
10701
10702                 self._save_resume_list()
10703
10704                 try:
10705                         self._background = self._background_mode()
10706                 except self._unknown_internal_error:
10707                         return 1
10708
10709                 for root in self.trees:
10710                         root_config = self.trees[root]["root_config"]
10711
10712                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10713                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10714                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10715                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10716                         if not tmpdir or not os.path.isdir(tmpdir):
10717                                 msg = "The directory specified in your " + \
10718                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10719                                 "does not exist. Please create this " + \
10720                                 "directory or correct your PORTAGE_TMPDIR setting."
10721                                 msg = textwrap.wrap(msg, 70)
10722                                 out = portage.output.EOutput()
10723                                 for l in msg:
10724                                         out.eerror(l)
10725                                 return 1
10726
10727                         if self._background:
10728                                 root_config.settings.unlock()
10729                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10730                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10731                                 root_config.settings.lock()
10732
10733                         self.pkgsettings[root] = portage.config(
10734                                 clone=root_config.settings)
10735
10736                 rval = self._generate_digests()
10737                 if rval != os.EX_OK:
10738                         return rval
10739
10740                 rval = self._check_manifests()
10741                 if rval != os.EX_OK:
10742                         return rval
10743
10744                 keep_going = "--keep-going" in self.myopts
10745                 fetchonly = self._build_opts.fetchonly
10746                 mtimedb = self._mtimedb
10747                 failed_pkgs = self._failed_pkgs
10748
10749                 while True:
10750                         rval = self._merge()
10751                         if rval == os.EX_OK or fetchonly or not keep_going:
10752                                 break
10753                         if "resume" not in mtimedb:
10754                                 break
10755                         mergelist = self._mtimedb["resume"].get("mergelist")
10756                         if not mergelist:
10757                                 break
10758
10759                         if not failed_pkgs:
10760                                 break
10761
10762                         for failed_pkg in failed_pkgs:
10763                                 mergelist.remove(list(failed_pkg.pkg))
10764
10765                         self._failed_pkgs_all.extend(failed_pkgs)
10766                         del failed_pkgs[:]
10767
10768                         if not mergelist:
10769                                 break
10770
10771                         if not self._calc_resume_list():
10772                                 break
10773
10774                         clear_caches(self.trees)
10775                         if not self._mergelist:
10776                                 break
10777
10778                         self._save_resume_list()
10779                         self._pkg_count.curval = 0
10780                         self._pkg_count.maxval = len([x for x in self._mergelist \
10781                                 if isinstance(x, Package) and x.operation == "merge"])
10782                         self._status_display.maxval = self._pkg_count.maxval
10783
10784                 self._logger.log(" *** Finished. Cleaning up...")
10785
10786                 if failed_pkgs:
10787                         self._failed_pkgs_all.extend(failed_pkgs)
10788                         del failed_pkgs[:]
10789
10790                 background = self._background
10791                 failure_log_shown = False
10792                 if background and len(self._failed_pkgs_all) == 1:
10793                         # If only one package failed then just show it's
10794                         # whole log for easy viewing.
10795                         failed_pkg = self._failed_pkgs_all[-1]
10796                         build_dir = failed_pkg.build_dir
10797                         log_file = None
10798
10799                         log_paths = [failed_pkg.build_log]
10800
10801                         log_path = self._locate_failure_log(failed_pkg)
10802                         if log_path is not None:
10803                                 try:
10804                                         log_file = open(log_path)
10805                                 except IOError:
10806                                         pass
10807
10808                         if log_file is not None:
10809                                 try:
10810                                         for line in log_file:
10811                                                 writemsg_level(line, noiselevel=-1)
10812                                 finally:
10813                                         log_file.close()
10814                                 failure_log_shown = True
10815
10816                 # Dump mod_echo output now since it tends to flood the terminal.
10817                 # This allows us to avoid having more important output, generated
10818                 # later, from being swept away by the mod_echo output.
10819                 mod_echo_output =  _flush_elog_mod_echo()
10820
10821                 if background and not failure_log_shown and \
10822                         self._failed_pkgs_all and \
10823                         self._failed_pkgs_die_msgs and \
10824                         not mod_echo_output:
10825
10826                         printer = portage.output.EOutput()
10827                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10828                                 root_msg = ""
10829                                 if mysettings["ROOT"] != "/":
10830                                         root_msg = " merged to %s" % mysettings["ROOT"]
10831                                 print
10832                                 printer.einfo("Error messages for package %s%s:" % \
10833                                         (colorize("INFORM", key), root_msg))
10834                                 print
10835                                 for phase in portage.const.EBUILD_PHASES:
10836                                         if phase not in logentries:
10837                                                 continue
10838                                         for msgtype, msgcontent in logentries[phase]:
10839                                                 if isinstance(msgcontent, basestring):
10840                                                         msgcontent = [msgcontent]
10841                                                 for line in msgcontent:
10842                                                         printer.eerror(line.strip("\n"))
10843
10844                 if self._post_mod_echo_msgs:
10845                         for msg in self._post_mod_echo_msgs:
10846                                 msg()
10847
10848                 if len(self._failed_pkgs_all) > 1 or \
10849                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10850                         if len(self._failed_pkgs_all) > 1:
10851                                 msg = "The following %d packages have " % \
10852                                         len(self._failed_pkgs_all) + \
10853                                         "failed to build or install:"
10854                         else:
10855                                 msg = "The following package has " + \
10856                                         "failed to build or install:"
10857                         prefix = bad(" * ")
10858                         writemsg(prefix + "\n", noiselevel=-1)
10859                         from textwrap import wrap
10860                         for line in wrap(msg, 72):
10861                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10862                         writemsg(prefix + "\n", noiselevel=-1)
10863                         for failed_pkg in self._failed_pkgs_all:
10864                                 writemsg("%s\t%s\n" % (prefix,
10865                                         colorize("INFORM", str(failed_pkg.pkg))),
10866                                         noiselevel=-1)
10867                         writemsg(prefix + "\n", noiselevel=-1)
10868
10869                 return rval
10870
10871         def _elog_listener(self, mysettings, key, logentries, fulltext):
10872                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10873                 if errors:
10874                         self._failed_pkgs_die_msgs.append(
10875                                 (mysettings, key, errors))
10876
10877         def _locate_failure_log(self, failed_pkg):
10878
10879                 build_dir = failed_pkg.build_dir
10880                 log_file = None
10881
10882                 log_paths = [failed_pkg.build_log]
10883
10884                 for log_path in log_paths:
10885                         if not log_path:
10886                                 continue
10887
10888                         try:
10889                                 log_size = os.stat(log_path).st_size
10890                         except OSError:
10891                                 continue
10892
10893                         if log_size == 0:
10894                                 continue
10895
10896                         return log_path
10897
10898                 return None
10899
10900         def _add_packages(self):
10901                 pkg_queue = self._pkg_queue
10902                 for pkg in self._mergelist:
10903                         if isinstance(pkg, Package):
10904                                 pkg_queue.append(pkg)
10905                         elif isinstance(pkg, Blocker):
10906                                 pass
10907
10908         def _system_merge_started(self, merge):
10909                 """
10910                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10911                 """
10912                 graph = self._digraph
10913                 if graph is None:
10914                         return
10915                 pkg = merge.merge.pkg
10916
10917                 # Skip this if $ROOT != / since it shouldn't matter if there
10918                 # are unsatisfied system runtime deps in this case.
10919                 if pkg.root != '/':
10920                         return
10921
10922                 completed_tasks = self._completed_tasks
10923                 unsatisfied = self._unsatisfied_system_deps
10924
10925                 def ignore_non_runtime_or_satisfied(priority):
10926                         """
10927                         Ignore non-runtime and satisfied runtime priorities.
10928                         """
10929                         if isinstance(priority, DepPriority) and \
10930                                 not priority.satisfied and \
10931                                 (priority.runtime or priority.runtime_post):
10932                                 return False
10933                         return True
10934
10935                 # When checking for unsatisfied runtime deps, only check
10936                 # direct deps since indirect deps are checked when the
10937                 # corresponding parent is merged.
10938                 for child in graph.child_nodes(pkg,
10939                         ignore_priority=ignore_non_runtime_or_satisfied):
10940                         if not isinstance(child, Package) or \
10941                                 child.operation == 'uninstall':
10942                                 continue
10943                         if child is pkg:
10944                                 continue
10945                         if child.operation == 'merge' and \
10946                                 child not in completed_tasks:
10947                                 unsatisfied.add(child)
10948
10949         def _merge_wait_exit_handler(self, task):
10950                 self._merge_wait_scheduled.remove(task)
10951                 self._merge_exit(task)
10952
10953         def _merge_exit(self, merge):
10954                 self._do_merge_exit(merge)
10955                 self._deallocate_config(merge.merge.settings)
10956                 if merge.returncode == os.EX_OK and \
10957                         not merge.merge.pkg.installed:
10958                         self._status_display.curval += 1
10959                 self._status_display.merges = len(self._task_queues.merge)
10960                 self._schedule()
10961
10962         def _do_merge_exit(self, merge):
10963                 pkg = merge.merge.pkg
10964                 if merge.returncode != os.EX_OK:
10965                         settings = merge.merge.settings
10966                         build_dir = settings.get("PORTAGE_BUILDDIR")
10967                         build_log = settings.get("PORTAGE_LOG_FILE")
10968
10969                         self._failed_pkgs.append(self._failed_pkg(
10970                                 build_dir=build_dir, build_log=build_log,
10971                                 pkg=pkg,
10972                                 returncode=merge.returncode))
10973                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10974
10975                         self._status_display.failed = len(self._failed_pkgs)
10976                         return
10977
10978                 self._task_complete(pkg)
10979                 pkg_to_replace = merge.merge.pkg_to_replace
10980                 if pkg_to_replace is not None:
10981                         # When a package is replaced, mark it's uninstall
10982                         # task complete (if any).
10983                         uninst_hash_key = \
10984                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10985                         self._task_complete(uninst_hash_key)
10986
10987                 if pkg.installed:
10988                         return
10989
10990                 self._restart_if_necessary(pkg)
10991
10992                 # Call mtimedb.commit() after each merge so that
10993                 # --resume still works after being interrupted
10994                 # by reboot, sigkill or similar.
10995                 mtimedb = self._mtimedb
10996                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10997                 if not mtimedb["resume"]["mergelist"]:
10998                         del mtimedb["resume"]
10999                 mtimedb.commit()
11000
11001         def _build_exit(self, build):
11002                 if build.returncode == os.EX_OK:
11003                         self.curval += 1
11004                         merge = PackageMerge(merge=build)
11005                         if not build.build_opts.buildpkgonly and \
11006                                 build.pkg in self._deep_system_deps:
11007                                 # Since dependencies on system packages are frequently
11008                                 # unspecified, merge them only when no builds are executing.
11009                                 self._merge_wait_queue.append(merge)
11010                                 merge.addStartListener(self._system_merge_started)
11011                         else:
11012                                 merge.addExitListener(self._merge_exit)
11013                                 self._task_queues.merge.add(merge)
11014                                 self._status_display.merges = len(self._task_queues.merge)
11015                 else:
11016                         settings = build.settings
11017                         build_dir = settings.get("PORTAGE_BUILDDIR")
11018                         build_log = settings.get("PORTAGE_LOG_FILE")
11019
11020                         self._failed_pkgs.append(self._failed_pkg(
11021                                 build_dir=build_dir, build_log=build_log,
11022                                 pkg=build.pkg,
11023                                 returncode=build.returncode))
11024                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11025
11026                         self._status_display.failed = len(self._failed_pkgs)
11027                         self._deallocate_config(build.settings)
11028                 self._jobs -= 1
11029                 self._status_display.running = self._jobs
11030                 self._schedule()
11031
11032         def _extract_exit(self, build):
11033                 self._build_exit(build)
11034
11035         def _task_complete(self, pkg):
11036                 self._completed_tasks.add(pkg)
11037                 self._unsatisfied_system_deps.discard(pkg)
11038                 self._choose_pkg_return_early = False
11039
11040         def _merge(self):
11041
11042                 self._add_prefetchers()
11043                 self._add_packages()
11044                 pkg_queue = self._pkg_queue
11045                 failed_pkgs = self._failed_pkgs
11046                 portage.locks._quiet = self._background
11047                 portage.elog._emerge_elog_listener = self._elog_listener
11048                 rval = os.EX_OK
11049
11050                 try:
11051                         self._main_loop()
11052                 finally:
11053                         self._main_loop_cleanup()
11054                         portage.locks._quiet = False
11055                         portage.elog._emerge_elog_listener = None
11056                         if failed_pkgs:
11057                                 rval = failed_pkgs[-1].returncode
11058
11059                 return rval
11060
11061         def _main_loop_cleanup(self):
11062                 del self._pkg_queue[:]
11063                 self._completed_tasks.clear()
11064                 self._deep_system_deps.clear()
11065                 self._unsatisfied_system_deps.clear()
11066                 self._choose_pkg_return_early = False
11067                 self._status_display.reset()
11068                 self._digraph = None
11069                 self._task_queues.fetch.clear()
11070
11071         def _choose_pkg(self):
11072                 """
11073                 Choose a task that has all it's dependencies satisfied.
11074                 """
11075
11076                 if self._choose_pkg_return_early:
11077                         return None
11078
11079                 if self._digraph is None:
11080                         if (self._jobs or self._task_queues.merge) and \
11081                                 not ("--nodeps" in self.myopts and \
11082                                 (self._max_jobs is True or self._max_jobs > 1)):
11083                                 self._choose_pkg_return_early = True
11084                                 return None
11085                         return self._pkg_queue.pop(0)
11086
11087                 if not (self._jobs or self._task_queues.merge):
11088                         return self._pkg_queue.pop(0)
11089
11090                 self._prune_digraph()
11091
11092                 chosen_pkg = None
11093                 later = set(self._pkg_queue)
11094                 for pkg in self._pkg_queue:
11095                         later.remove(pkg)
11096                         if not self._dependent_on_scheduled_merges(pkg, later):
11097                                 chosen_pkg = pkg
11098                                 break
11099
11100                 if chosen_pkg is not None:
11101                         self._pkg_queue.remove(chosen_pkg)
11102
11103                 if chosen_pkg is None:
11104                         # There's no point in searching for a package to
11105                         # choose until at least one of the existing jobs
11106                         # completes.
11107                         self._choose_pkg_return_early = True
11108
11109                 return chosen_pkg
11110
11111         def _dependent_on_scheduled_merges(self, pkg, later):
11112                 """
11113                 Traverse the subgraph of the given packages deep dependencies
11114                 to see if it contains any scheduled merges.
11115                 @param pkg: a package to check dependencies for
11116                 @type pkg: Package
11117                 @param later: packages for which dependence should be ignored
11118                         since they will be merged later than pkg anyway and therefore
11119                         delaying the merge of pkg will not result in a more optimal
11120                         merge order
11121                 @type later: set
11122                 @rtype: bool
11123                 @returns: True if the package is dependent, False otherwise.
11124                 """
11125
11126                 graph = self._digraph
11127                 completed_tasks = self._completed_tasks
11128
11129                 dependent = False
11130                 traversed_nodes = set([pkg])
11131                 direct_deps = graph.child_nodes(pkg)
11132                 node_stack = direct_deps
11133                 direct_deps = frozenset(direct_deps)
11134                 while node_stack:
11135                         node = node_stack.pop()
11136                         if node in traversed_nodes:
11137                                 continue
11138                         traversed_nodes.add(node)
11139                         if not ((node.installed and node.operation == "nomerge") or \
11140                                 (node.operation == "uninstall" and \
11141                                 node not in direct_deps) or \
11142                                 node in completed_tasks or \
11143                                 node in later):
11144                                 dependent = True
11145                                 break
11146                         node_stack.extend(graph.child_nodes(node))
11147
11148                 return dependent
11149
11150         def _allocate_config(self, root):
11151                 """
11152                 Allocate a unique config instance for a task in order
11153                 to prevent interference between parallel tasks.
11154                 """
11155                 if self._config_pool[root]:
11156                         temp_settings = self._config_pool[root].pop()
11157                 else:
11158                         temp_settings = portage.config(clone=self.pkgsettings[root])
11159                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11160                 # performance reasons, call it here to make sure all settings from the
11161                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11162                 temp_settings.reload()
11163                 temp_settings.reset()
11164                 return temp_settings
11165
11166         def _deallocate_config(self, settings):
11167                 self._config_pool[settings["ROOT"]].append(settings)
11168
11169         def _main_loop(self):
11170
11171                 # Only allow 1 job max if a restart is scheduled
11172                 # due to portage update.
11173                 if self._is_restart_scheduled() or \
11174                         self._opts_no_background.intersection(self.myopts):
11175                         self._set_max_jobs(1)
11176
11177                 merge_queue = self._task_queues.merge
11178
11179                 while self._schedule():
11180                         if self._poll_event_handlers:
11181                                 self._poll_loop()
11182
11183                 while True:
11184                         self._schedule()
11185                         if not (self._jobs or merge_queue):
11186                                 break
11187                         if self._poll_event_handlers:
11188                                 self._poll_loop()
11189
11190         def _keep_scheduling(self):
11191                 return bool(self._pkg_queue and \
11192                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11193
11194         def _schedule_tasks(self):
11195
11196                 # When the number of jobs drops to zero, process all waiting merges.
11197                 if not self._jobs and self._merge_wait_queue:
11198                         for task in self._merge_wait_queue:
11199                                 task.addExitListener(self._merge_wait_exit_handler)
11200                                 self._task_queues.merge.add(task)
11201                         self._status_display.merges = len(self._task_queues.merge)
11202                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11203                         del self._merge_wait_queue[:]
11204
11205                 self._schedule_tasks_imp()
11206                 self._status_display.display()
11207
11208                 state_change = 0
11209                 for q in self._task_queues.values():
11210                         if q.schedule():
11211                                 state_change += 1
11212
11213                 # Cancel prefetchers if they're the only reason
11214                 # the main poll loop is still running.
11215                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11216                         not (self._jobs or self._task_queues.merge) and \
11217                         self._task_queues.fetch:
11218                         self._task_queues.fetch.clear()
11219                         state_change += 1
11220
11221                 if state_change:
11222                         self._schedule_tasks_imp()
11223                         self._status_display.display()
11224
11225                 return self._keep_scheduling()
11226
11227         def _job_delay(self):
11228                 """
11229                 @rtype: bool
11230                 @returns: True if job scheduling should be delayed, False otherwise.
11231                 """
11232
11233                 if self._jobs and self._max_load is not None:
11234
11235                         current_time = time.time()
11236
11237                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11238                         if delay > self._job_delay_max:
11239                                 delay = self._job_delay_max
11240                         if (current_time - self._previous_job_start_time) < delay:
11241                                 return True
11242
11243                 return False
11244
11245         def _schedule_tasks_imp(self):
11246                 """
11247                 @rtype: bool
11248                 @returns: True if state changed, False otherwise.
11249                 """
11250
11251                 state_change = 0
11252
11253                 while True:
11254
11255                         if not self._keep_scheduling():
11256                                 return bool(state_change)
11257
11258                         if self._choose_pkg_return_early or \
11259                                 self._merge_wait_scheduled or \
11260                                 (self._jobs and self._unsatisfied_system_deps) or \
11261                                 not self._can_add_job() or \
11262                                 self._job_delay():
11263                                 return bool(state_change)
11264
11265                         pkg = self._choose_pkg()
11266                         if pkg is None:
11267                                 return bool(state_change)
11268
11269                         state_change += 1
11270
11271                         if not pkg.installed:
11272                                 self._pkg_count.curval += 1
11273
11274                         task = self._task(pkg)
11275
11276                         if pkg.installed:
11277                                 merge = PackageMerge(merge=task)
11278                                 merge.addExitListener(self._merge_exit)
11279                                 self._task_queues.merge.add(merge)
11280
11281                         elif pkg.built:
11282                                 self._jobs += 1
11283                                 self._previous_job_start_time = time.time()
11284                                 self._status_display.running = self._jobs
11285                                 task.addExitListener(self._extract_exit)
11286                                 self._task_queues.jobs.add(task)
11287
11288                         else:
11289                                 self._jobs += 1
11290                                 self._previous_job_start_time = time.time()
11291                                 self._status_display.running = self._jobs
11292                                 task.addExitListener(self._build_exit)
11293                                 self._task_queues.jobs.add(task)
11294
11295                 return bool(state_change)
11296
11297         def _task(self, pkg):
11298
11299                 pkg_to_replace = None
11300                 if pkg.operation != "uninstall":
11301                         vardb = pkg.root_config.trees["vartree"].dbapi
11302                         previous_cpv = vardb.match(pkg.slot_atom)
11303                         if previous_cpv:
11304                                 previous_cpv = previous_cpv.pop()
11305                                 pkg_to_replace = self._pkg(previous_cpv,
11306                                         "installed", pkg.root_config, installed=True)
11307
11308                 task = MergeListItem(args_set=self._args_set,
11309                         background=self._background, binpkg_opts=self._binpkg_opts,
11310                         build_opts=self._build_opts,
11311                         config_pool=self._ConfigPool(pkg.root,
11312                         self._allocate_config, self._deallocate_config),
11313                         emerge_opts=self.myopts,
11314                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11315                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11316                         pkg_to_replace=pkg_to_replace,
11317                         prefetcher=self._prefetchers.get(pkg),
11318                         scheduler=self._sched_iface,
11319                         settings=self._allocate_config(pkg.root),
11320                         statusMessage=self._status_msg,
11321                         world_atom=self._world_atom)
11322
11323                 return task
11324
11325         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11326                 pkg = failed_pkg.pkg
11327                 msg = "%s to %s %s" % \
11328                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11329                 if pkg.root != "/":
11330                         msg += " %s %s" % (preposition, pkg.root)
11331
11332                 log_path = self._locate_failure_log(failed_pkg)
11333                 if log_path is not None:
11334                         msg += ", Log file:"
11335                 self._status_msg(msg)
11336
11337                 if log_path is not None:
11338                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11339
11340         def _status_msg(self, msg):
11341                 """
11342                 Display a brief status message (no newlines) in the status display.
11343                 This is called by tasks to provide feedback to the user. This
11344                 delegates the resposibility of generating \r and \n control characters,
11345                 to guarantee that lines are created or erased when necessary and
11346                 appropriate.
11347
11348                 @type msg: str
11349                 @param msg: a brief status message (no newlines allowed)
11350                 """
11351                 if not self._background:
11352                         writemsg_level("\n")
11353                 self._status_display.displayMessage(msg)
11354
11355         def _save_resume_list(self):
11356                 """
11357                 Do this before verifying the ebuild Manifests since it might
11358                 be possible for the user to use --resume --skipfirst get past
11359                 a non-essential package with a broken digest.
11360                 """
11361                 mtimedb = self._mtimedb
11362                 mtimedb["resume"]["mergelist"] = [list(x) \
11363                         for x in self._mergelist \
11364                         if isinstance(x, Package) and x.operation == "merge"]
11365
11366                 mtimedb.commit()
11367
11368         def _calc_resume_list(self):
11369                 """
11370                 Use the current resume list to calculate a new one,
11371                 dropping any packages with unsatisfied deps.
11372                 @rtype: bool
11373                 @returns: True if successful, False otherwise.
11374                 """
11375                 print colorize("GOOD", "*** Resuming merge...")
11376
11377                 if self._show_list():
11378                         if "--tree" in self.myopts:
11379                                 portage.writemsg_stdout("\n" + \
11380                                         darkgreen("These are the packages that " + \
11381                                         "would be merged, in reverse order:\n\n"))
11382
11383                         else:
11384                                 portage.writemsg_stdout("\n" + \
11385                                         darkgreen("These are the packages that " + \
11386                                         "would be merged, in order:\n\n"))
11387
11388                 show_spinner = "--quiet" not in self.myopts and \
11389                         "--nodeps" not in self.myopts
11390
11391                 if show_spinner:
11392                         print "Calculating dependencies  ",
11393
11394                 myparams = create_depgraph_params(self.myopts, None)
11395                 success = False
11396                 e = None
11397                 try:
11398                         success, mydepgraph, dropped_tasks = resume_depgraph(
11399                                 self.settings, self.trees, self._mtimedb, self.myopts,
11400                                 myparams, self._spinner)
11401                 except depgraph.UnsatisfiedResumeDep, exc:
11402                         # rename variable to avoid python-3.0 error:
11403                         # SyntaxError: can not delete variable 'e' referenced in nested
11404                         #              scope
11405                         e = exc
11406                         mydepgraph = e.depgraph
11407                         dropped_tasks = set()
11408
11409                 if show_spinner:
11410                         print "\b\b... done!"
11411
11412                 if e is not None:
11413                         def unsatisfied_resume_dep_msg():
11414                                 mydepgraph.display_problems()
11415                                 out = portage.output.EOutput()
11416                                 out.eerror("One or more packages are either masked or " + \
11417                                         "have missing dependencies:")
11418                                 out.eerror("")
11419                                 indent = "  "
11420                                 show_parents = set()
11421                                 for dep in e.value:
11422                                         if dep.parent in show_parents:
11423                                                 continue
11424                                         show_parents.add(dep.parent)
11425                                         if dep.atom is None:
11426                                                 out.eerror(indent + "Masked package:")
11427                                                 out.eerror(2 * indent + str(dep.parent))
11428                                                 out.eerror("")
11429                                         else:
11430                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11431                                                 out.eerror(2 * indent + str(dep.parent))
11432                                                 out.eerror("")
11433                                 msg = "The resume list contains packages " + \
11434                                         "that are either masked or have " + \
11435                                         "unsatisfied dependencies. " + \
11436                                         "Please restart/continue " + \
11437                                         "the operation manually, or use --skipfirst " + \
11438                                         "to skip the first package in the list and " + \
11439                                         "any other packages that may be " + \
11440                                         "masked or have missing dependencies."
11441                                 for line in textwrap.wrap(msg, 72):
11442                                         out.eerror(line)
11443                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11444                         return False
11445
11446                 if success and self._show_list():
11447                         mylist = mydepgraph.altlist()
11448                         if mylist:
11449                                 if "--tree" in self.myopts:
11450                                         mylist.reverse()
11451                                 mydepgraph.display(mylist, favorites=self._favorites)
11452
11453                 if not success:
11454                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11455                         return False
11456                 mydepgraph.display_problems()
11457
11458                 mylist = mydepgraph.altlist()
11459                 mydepgraph.break_refs(mylist)
11460                 mydepgraph.break_refs(dropped_tasks)
11461                 self._mergelist = mylist
11462                 self._set_digraph(mydepgraph.schedulerGraph())
11463
11464                 msg_width = 75
11465                 for task in dropped_tasks:
11466                         if not (isinstance(task, Package) and task.operation == "merge"):
11467                                 continue
11468                         pkg = task
11469                         msg = "emerge --keep-going:" + \
11470                                 " %s" % (pkg.cpv,)
11471                         if pkg.root != "/":
11472                                 msg += " for %s" % (pkg.root,)
11473                         msg += " dropped due to unsatisfied dependency."
11474                         for line in textwrap.wrap(msg, msg_width):
11475                                 eerror(line, phase="other", key=pkg.cpv)
11476                         settings = self.pkgsettings[pkg.root]
11477                         # Ensure that log collection from $T is disabled inside
11478                         # elog_process(), since any logs that might exist are
11479                         # not valid here.
11480                         settings.pop("T", None)
11481                         portage.elog.elog_process(pkg.cpv, settings)
11482                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11483
11484                 return True
11485
11486         def _show_list(self):
11487                 myopts = self.myopts
11488                 if "--quiet" not in myopts and \
11489                         ("--ask" in myopts or "--tree" in myopts or \
11490                         "--verbose" in myopts):
11491                         return True
11492                 return False
11493
11494         def _world_atom(self, pkg):
11495                 """
11496                 Add the package to the world file, but only if
11497                 it's supposed to be added. Otherwise, do nothing.
11498                 """
11499
11500                 if set(("--buildpkgonly", "--fetchonly",
11501                         "--fetch-all-uri",
11502                         "--oneshot", "--onlydeps",
11503                         "--pretend")).intersection(self.myopts):
11504                         return
11505
11506                 if pkg.root != self.target_root:
11507                         return
11508
11509                 args_set = self._args_set
11510                 if not args_set.findAtomForPackage(pkg):
11511                         return
11512
11513                 logger = self._logger
11514                 pkg_count = self._pkg_count
11515                 root_config = pkg.root_config
11516                 world_set = root_config.sets["world"]
11517                 world_locked = False
11518                 if hasattr(world_set, "lock"):
11519                         world_set.lock()
11520                         world_locked = True
11521
11522                 try:
11523                         if hasattr(world_set, "load"):
11524                                 world_set.load() # maybe it's changed on disk
11525
11526                         atom = create_world_atom(pkg, args_set, root_config)
11527                         if atom:
11528                                 if hasattr(world_set, "add"):
11529                                         self._status_msg(('Recording %s in "world" ' + \
11530                                                 'favorites file...') % atom)
11531                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11532                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11533                                         world_set.add(atom)
11534                                 else:
11535                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11536                                                 (atom,), level=logging.WARN, noiselevel=-1)
11537                 finally:
11538                         if world_locked:
11539                                 world_set.unlock()
11540
11541         def _pkg(self, cpv, type_name, root_config, installed=False):
11542                 """
11543                 Get a package instance from the cache, or create a new
11544                 one if necessary. Raises KeyError from aux_get if it
11545                 failures for some reason (package does not exist or is
11546                 corrupt).
11547                 """
11548                 operation = "merge"
11549                 if installed:
11550                         operation = "nomerge"
11551
11552                 if self._digraph is not None:
11553                         # Reuse existing instance when available.
11554                         pkg = self._digraph.get(
11555                                 (type_name, root_config.root, cpv, operation))
11556                         if pkg is not None:
11557                                 return pkg
11558
11559                 tree_type = depgraph.pkg_tree_map[type_name]
11560                 db = root_config.trees[tree_type].dbapi
11561                 db_keys = list(self.trees[root_config.root][
11562                         tree_type].dbapi._aux_cache_keys)
11563                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11564                 pkg = Package(cpv=cpv, metadata=metadata,
11565                         root_config=root_config, installed=installed)
11566                 if type_name == "ebuild":
11567                         settings = self.pkgsettings[root_config.root]
11568                         settings.setcpv(pkg)
11569                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11570                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11571
11572                 return pkg
11573
11574 class MetadataRegen(PollScheduler):
11575
11576         def __init__(self, portdb, cp_iter=None, max_jobs=None, max_load=None):
11577                 PollScheduler.__init__(self)
11578                 self._portdb = portdb
11579                 self._global_cleanse = False
11580                 if cp_iter is None:
11581                         cp_iter = self._iter_every_cp()
11582                         # We can globally cleanse stale cache only if we
11583                         # iterate over every single cp.
11584                         self._global_cleanse = True
11585                 self._cp_iter = cp_iter
11586
11587                 if max_jobs is None:
11588                         max_jobs = 1
11589
11590                 self._max_jobs = max_jobs
11591                 self._max_load = max_load
11592                 self._sched_iface = self._sched_iface_class(
11593                         register=self._register,
11594                         schedule=self._schedule_wait,
11595                         unregister=self._unregister)
11596
11597                 self._valid_pkgs = set()
11598                 self._cp_set = set()
11599                 self._process_iter = self._iter_metadata_processes()
11600                 self.returncode = os.EX_OK
11601                 self._error_count = 0
11602
11603         def _iter_every_cp(self):
11604                 every_cp = self._portdb.cp_all()
11605                 every_cp.sort(reverse=True)
11606                 try:
11607                         while True:
11608                                 yield every_cp.pop()
11609                 except IndexError:
11610                         pass
11611
11612         def _iter_metadata_processes(self):
11613                 portdb = self._portdb
11614                 valid_pkgs = self._valid_pkgs
11615                 cp_set = self._cp_set
11616
11617                 for cp in self._cp_iter:
11618                         cp_set.add(cp)
11619                         portage.writemsg_stdout("Processing %s\n" % cp)
11620                         cpv_list = portdb.cp_list(cp)
11621                         for cpv in cpv_list:
11622                                 valid_pkgs.add(cpv)
11623                                 ebuild_path, repo_path = portdb.findname2(cpv)
11624                                 metadata_process = portdb._metadata_process(
11625                                         cpv, ebuild_path, repo_path)
11626                                 if metadata_process is None:
11627                                         continue
11628                                 yield metadata_process
11629
11630         def run(self):
11631
11632                 portdb = self._portdb
11633                 from portage.cache.cache_errors import CacheError
11634                 dead_nodes = {}
11635
11636                 while self._schedule():
11637                         self._poll_loop()
11638
11639                 while self._jobs:
11640                         self._poll_loop()
11641
11642                 if self._global_cleanse:
11643                         for mytree in portdb.porttrees:
11644                                 try:
11645                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11646                                 except CacheError, e:
11647                                         portage.writemsg("Error listing cache entries for " + \
11648                                                 "'%s': %s, continuing...\n" % (mytree, e),
11649                                                 noiselevel=-1)
11650                                         del e
11651                                         dead_nodes = None
11652                                         break
11653                 else:
11654                         cp_set = self._cp_set
11655                         cpv_getkey = portage.cpv_getkey
11656                         for mytree in portdb.porttrees:
11657                                 try:
11658                                         dead_nodes[mytree] = set(cpv for cpv in \
11659                                                 portdb.auxdb[mytree].iterkeys() \
11660                                                 if cpv_getkey(cpv) in cp_set)
11661                                 except CacheError, e:
11662                                         portage.writemsg("Error listing cache entries for " + \
11663                                                 "'%s': %s, continuing...\n" % (mytree, e),
11664                                                 noiselevel=-1)
11665                                         del e
11666                                         dead_nodes = None
11667                                         break
11668
11669                 if dead_nodes:
11670                         for y in self._valid_pkgs:
11671                                 for mytree in portdb.porttrees:
11672                                         if portdb.findname2(y, mytree=mytree)[0]:
11673                                                 dead_nodes[mytree].discard(y)
11674
11675                         for mytree, nodes in dead_nodes.iteritems():
11676                                 auxdb = portdb.auxdb[mytree]
11677                                 for y in nodes:
11678                                         try:
11679                                                 del auxdb[y]
11680                                         except (KeyError, CacheError):
11681                                                 pass
11682
11683         def _schedule_tasks(self):
11684                 """
11685                 @rtype: bool
11686                 @returns: True if there may be remaining tasks to schedule,
11687                         False otherwise.
11688                 """
11689                 while self._can_add_job():
11690                         try:
11691                                 metadata_process = self._process_iter.next()
11692                         except StopIteration:
11693                                 return False
11694
11695                         self._jobs += 1
11696                         metadata_process.scheduler = self._sched_iface
11697                         metadata_process.addExitListener(self._metadata_exit)
11698                         metadata_process.start()
11699                 return True
11700
11701         def _metadata_exit(self, metadata_process):
11702                 self._jobs -= 1
11703                 if metadata_process.returncode != os.EX_OK:
11704                         self.returncode = 1
11705                         self._error_count += 1
11706                         self._valid_pkgs.discard(metadata_process.cpv)
11707                         portage.writemsg("Error processing %s, continuing...\n" % \
11708                                 (metadata_process.cpv,))
11709                 self._schedule()
11710
11711 class UninstallFailure(portage.exception.PortageException):
11712         """
11713         An instance of this class is raised by unmerge() when
11714         an uninstallation fails.
11715         """
11716         status = 1
11717         def __init__(self, *pargs):
11718                 portage.exception.PortageException.__init__(self, pargs)
11719                 if pargs:
11720                         self.status = pargs[0]
11721
11722 def unmerge(root_config, myopts, unmerge_action,
11723         unmerge_files, ldpath_mtimes, autoclean=0,
11724         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11725         scheduler=None, writemsg_level=portage.util.writemsg_level):
11726
11727         quiet = "--quiet" in myopts
11728         settings = root_config.settings
11729         sets = root_config.sets
11730         vartree = root_config.trees["vartree"]
11731         candidate_catpkgs=[]
11732         global_unmerge=0
11733         xterm_titles = "notitles" not in settings.features
11734         out = portage.output.EOutput()
11735         pkg_cache = {}
11736         db_keys = list(vartree.dbapi._aux_cache_keys)
11737
11738         def _pkg(cpv):
11739                 pkg = pkg_cache.get(cpv)
11740                 if pkg is None:
11741                         pkg = Package(cpv=cpv, installed=True,
11742                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11743                                 root_config=root_config,
11744                                 type_name="installed")
11745                         pkg_cache[cpv] = pkg
11746                 return pkg
11747
11748         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11749         try:
11750                 # At least the parent needs to exist for the lock file.
11751                 portage.util.ensure_dirs(vdb_path)
11752         except portage.exception.PortageException:
11753                 pass
11754         vdb_lock = None
11755         try:
11756                 if os.access(vdb_path, os.W_OK):
11757                         vdb_lock = portage.locks.lockdir(vdb_path)
11758                 realsyslist = sets["system"].getAtoms()
11759                 syslist = []
11760                 for x in realsyslist:
11761                         mycp = portage.dep_getkey(x)
11762                         if mycp in settings.getvirtuals():
11763                                 providers = []
11764                                 for provider in settings.getvirtuals()[mycp]:
11765                                         if vartree.dbapi.match(provider):
11766                                                 providers.append(provider)
11767                                 if len(providers) == 1:
11768                                         syslist.extend(providers)
11769                         else:
11770                                 syslist.append(mycp)
11771         
11772                 mysettings = portage.config(clone=settings)
11773         
11774                 if not unmerge_files:
11775                         if unmerge_action == "unmerge":
11776                                 print
11777                                 print bold("emerge unmerge") + " can only be used with specific package names"
11778                                 print
11779                                 return 0
11780                         else:
11781                                 global_unmerge = 1
11782         
11783                 localtree = vartree
11784                 # process all arguments and add all
11785                 # valid db entries to candidate_catpkgs
11786                 if global_unmerge:
11787                         if not unmerge_files:
11788                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11789                 else:
11790                         #we've got command-line arguments
11791                         if not unmerge_files:
11792                                 print "\nNo packages to unmerge have been provided.\n"
11793                                 return 0
11794                         for x in unmerge_files:
11795                                 arg_parts = x.split('/')
11796                                 if x[0] not in [".","/"] and \
11797                                         arg_parts[-1][-7:] != ".ebuild":
11798                                         #possible cat/pkg or dep; treat as such
11799                                         candidate_catpkgs.append(x)
11800                                 elif unmerge_action in ["prune","clean"]:
11801                                         print "\n!!! Prune and clean do not accept individual" + \
11802                                                 " ebuilds as arguments;\n    skipping.\n"
11803                                         continue
11804                                 else:
11805                                         # it appears that the user is specifying an installed
11806                                         # ebuild and we're in "unmerge" mode, so it's ok.
11807                                         if not os.path.exists(x):
11808                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11809                                                 return 0
11810         
11811                                         absx   = os.path.abspath(x)
11812                                         sp_absx = absx.split("/")
11813                                         if sp_absx[-1][-7:] == ".ebuild":
11814                                                 del sp_absx[-1]
11815                                                 absx = "/".join(sp_absx)
11816         
11817                                         sp_absx_len = len(sp_absx)
11818         
11819                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11820                                         vdb_len  = len(vdb_path)
11821         
11822                                         sp_vdb     = vdb_path.split("/")
11823                                         sp_vdb_len = len(sp_vdb)
11824         
11825                                         if not os.path.exists(absx+"/CONTENTS"):
11826                                                 print "!!! Not a valid db dir: "+str(absx)
11827                                                 return 0
11828         
11829                                         if sp_absx_len <= sp_vdb_len:
11830                                                 # The Path is shorter... so it can't be inside the vdb.
11831                                                 print sp_absx
11832                                                 print absx
11833                                                 print "\n!!!",x,"cannot be inside "+ \
11834                                                         vdb_path+"; aborting.\n"
11835                                                 return 0
11836         
11837                                         for idx in range(0,sp_vdb_len):
11838                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11839                                                         print sp_absx
11840                                                         print absx
11841                                                         print "\n!!!", x, "is not inside "+\
11842                                                                 vdb_path+"; aborting.\n"
11843                                                         return 0
11844         
11845                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11846                                         candidate_catpkgs.append(
11847                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11848         
11849                 newline=""
11850                 if (not "--quiet" in myopts):
11851                         newline="\n"
11852                 if settings["ROOT"] != "/":
11853                         writemsg_level(darkgreen(newline+ \
11854                                 ">>> Using system located in ROOT tree %s\n" % \
11855                                 settings["ROOT"]))
11856
11857                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11858                         not ("--quiet" in myopts):
11859                         writemsg_level(darkgreen(newline+\
11860                                 ">>> These are the packages that would be unmerged:\n"))
11861
11862                 # Preservation of order is required for --depclean and --prune so
11863                 # that dependencies are respected. Use all_selected to eliminate
11864                 # duplicate packages since the same package may be selected by
11865                 # multiple atoms.
11866                 pkgmap = []
11867                 all_selected = set()
11868                 for x in candidate_catpkgs:
11869                         # cycle through all our candidate deps and determine
11870                         # what will and will not get unmerged
11871                         try:
11872                                 mymatch = vartree.dbapi.match(x)
11873                         except portage.exception.AmbiguousPackageName, errpkgs:
11874                                 print "\n\n!!! The short ebuild name \"" + \
11875                                         x + "\" is ambiguous.  Please specify"
11876                                 print "!!! one of the following fully-qualified " + \
11877                                         "ebuild names instead:\n"
11878                                 for i in errpkgs[0]:
11879                                         print "    " + green(i)
11880                                 print
11881                                 sys.exit(1)
11882         
11883                         if not mymatch and x[0] not in "<>=~":
11884                                 mymatch = localtree.dep_match(x)
11885                         if not mymatch:
11886                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11887                                         (x, unmerge_action), noiselevel=-1)
11888                                 continue
11889
11890                         pkgmap.append(
11891                                 {"protected": set(), "selected": set(), "omitted": set()})
11892                         mykey = len(pkgmap) - 1
11893                         if unmerge_action=="unmerge":
11894                                         for y in mymatch:
11895                                                 if y not in all_selected:
11896                                                         pkgmap[mykey]["selected"].add(y)
11897                                                         all_selected.add(y)
11898                         elif unmerge_action == "prune":
11899                                 if len(mymatch) == 1:
11900                                         continue
11901                                 best_version = mymatch[0]
11902                                 best_slot = vartree.getslot(best_version)
11903                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11904                                 for mypkg in mymatch[1:]:
11905                                         myslot = vartree.getslot(mypkg)
11906                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11907                                         if (myslot == best_slot and mycounter > best_counter) or \
11908                                                 mypkg == portage.best([mypkg, best_version]):
11909                                                 if myslot == best_slot:
11910                                                         if mycounter < best_counter:
11911                                                                 # On slot collision, keep the one with the
11912                                                                 # highest counter since it is the most
11913                                                                 # recently installed.
11914                                                                 continue
11915                                                 best_version = mypkg
11916                                                 best_slot = myslot
11917                                                 best_counter = mycounter
11918                                 pkgmap[mykey]["protected"].add(best_version)
11919                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11920                                         if mypkg != best_version and mypkg not in all_selected)
11921                                 all_selected.update(pkgmap[mykey]["selected"])
11922                         else:
11923                                 # unmerge_action == "clean"
11924                                 slotmap={}
11925                                 for mypkg in mymatch:
11926                                         if unmerge_action == "clean":
11927                                                 myslot = localtree.getslot(mypkg)
11928                                         else:
11929                                                 # since we're pruning, we don't care about slots
11930                                                 # and put all the pkgs in together
11931                                                 myslot = 0
11932                                         if myslot not in slotmap:
11933                                                 slotmap[myslot] = {}
11934                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11935
11936                                 for mypkg in vartree.dbapi.cp_list(
11937                                         portage.dep_getkey(mymatch[0])):
11938                                         myslot = vartree.getslot(mypkg)
11939                                         if myslot not in slotmap:
11940                                                 slotmap[myslot] = {}
11941                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11942
11943                                 for myslot in slotmap:
11944                                         counterkeys = slotmap[myslot].keys()
11945                                         if not counterkeys:
11946                                                 continue
11947                                         counterkeys.sort()
11948                                         pkgmap[mykey]["protected"].add(
11949                                                 slotmap[myslot][counterkeys[-1]])
11950                                         del counterkeys[-1]
11951
11952                                         for counter in counterkeys[:]:
11953                                                 mypkg = slotmap[myslot][counter]
11954                                                 if mypkg not in mymatch:
11955                                                         counterkeys.remove(counter)
11956                                                         pkgmap[mykey]["protected"].add(
11957                                                                 slotmap[myslot][counter])
11958
11959                                         #be pretty and get them in order of merge:
11960                                         for ckey in counterkeys:
11961                                                 mypkg = slotmap[myslot][ckey]
11962                                                 if mypkg not in all_selected:
11963                                                         pkgmap[mykey]["selected"].add(mypkg)
11964                                                         all_selected.add(mypkg)
11965                                         # ok, now the last-merged package
11966                                         # is protected, and the rest are selected
11967                 numselected = len(all_selected)
11968                 if global_unmerge and not numselected:
11969                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11970                         return 0
11971         
11972                 if not numselected:
11973                         portage.writemsg_stdout(
11974                                 "\n>>> No packages selected for removal by " + \
11975                                 unmerge_action + "\n")
11976                         return 0
11977         finally:
11978                 if vdb_lock:
11979                         vartree.dbapi.flush_cache()
11980                         portage.locks.unlockdir(vdb_lock)
11981         
11982         from portage.sets.base import EditablePackageSet
11983         
11984         # generate a list of package sets that are directly or indirectly listed in "world",
11985         # as there is no persistent list of "installed" sets
11986         installed_sets = ["world"]
11987         stop = False
11988         pos = 0
11989         while not stop:
11990                 stop = True
11991                 pos = len(installed_sets)
11992                 for s in installed_sets[pos - 1:]:
11993                         if s not in sets:
11994                                 continue
11995                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11996                         if candidates:
11997                                 stop = False
11998                                 installed_sets += candidates
11999         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12000         del stop, pos
12001
12002         # we don't want to unmerge packages that are still listed in user-editable package sets
12003         # listed in "world" as they would be remerged on the next update of "world" or the 
12004         # relevant package sets.
12005         unknown_sets = set()
12006         for cp in xrange(len(pkgmap)):
12007                 for cpv in pkgmap[cp]["selected"].copy():
12008                         try:
12009                                 pkg = _pkg(cpv)
12010                         except KeyError:
12011                                 # It could have been uninstalled
12012                                 # by a concurrent process.
12013                                 continue
12014
12015                         if unmerge_action != "clean" and \
12016                                 root_config.root == "/" and \
12017                                 portage.match_from_list(
12018                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12019                                 msg = ("Not unmerging package %s since there is no valid " + \
12020                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
12021                                 for line in textwrap.wrap(msg, 75):
12022                                         out.eerror(line)
12023                                 # adjust pkgmap so the display output is correct
12024                                 pkgmap[cp]["selected"].remove(cpv)
12025                                 all_selected.remove(cpv)
12026                                 pkgmap[cp]["protected"].add(cpv)
12027                                 continue
12028
12029                         parents = []
12030                         for s in installed_sets:
12031                                 # skip sets that the user requested to unmerge, and skip world 
12032                                 # unless we're unmerging a package set (as the package would be 
12033                                 # removed from "world" later on)
12034                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12035                                         continue
12036
12037                                 if s not in sets:
12038                                         if s in unknown_sets:
12039                                                 continue
12040                                         unknown_sets.add(s)
12041                                         out = portage.output.EOutput()
12042                                         out.eerror(("Unknown set '@%s' in " + \
12043                                                 "%svar/lib/portage/world_sets") % \
12044                                                 (s, root_config.root))
12045                                         continue
12046
12047                                 # only check instances of EditablePackageSet as other classes are generally used for
12048                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12049                                 # user can't do much about them anyway)
12050                                 if isinstance(sets[s], EditablePackageSet):
12051
12052                                         # This is derived from a snippet of code in the
12053                                         # depgraph._iter_atoms_for_pkg() method.
12054                                         for atom in sets[s].iterAtomsForPackage(pkg):
12055                                                 inst_matches = vartree.dbapi.match(atom)
12056                                                 inst_matches.reverse() # descending order
12057                                                 higher_slot = None
12058                                                 for inst_cpv in inst_matches:
12059                                                         try:
12060                                                                 inst_pkg = _pkg(inst_cpv)
12061                                                         except KeyError:
12062                                                                 # It could have been uninstalled
12063                                                                 # by a concurrent process.
12064                                                                 continue
12065
12066                                                         if inst_pkg.cp != atom.cp:
12067                                                                 continue
12068                                                         if pkg >= inst_pkg:
12069                                                                 # This is descending order, and we're not
12070                                                                 # interested in any versions <= pkg given.
12071                                                                 break
12072                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12073                                                                 higher_slot = inst_pkg
12074                                                                 break
12075                                                 if higher_slot is None:
12076                                                         parents.append(s)
12077                                                         break
12078                         if parents:
12079                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12080                                 #print colorize("WARN", "but still listed in the following package sets:")
12081                                 #print "    %s\n" % ", ".join(parents)
12082                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12083                                 print colorize("WARN", "still referenced by the following package sets:")
12084                                 print "    %s\n" % ", ".join(parents)
12085                                 # adjust pkgmap so the display output is correct
12086                                 pkgmap[cp]["selected"].remove(cpv)
12087                                 all_selected.remove(cpv)
12088                                 pkgmap[cp]["protected"].add(cpv)
12089         
12090         del installed_sets
12091
12092         numselected = len(all_selected)
12093         if not numselected:
12094                 writemsg_level(
12095                         "\n>>> No packages selected for removal by " + \
12096                         unmerge_action + "\n")
12097                 return 0
12098
12099         # Unmerge order only matters in some cases
12100         if not ordered:
12101                 unordered = {}
12102                 for d in pkgmap:
12103                         selected = d["selected"]
12104                         if not selected:
12105                                 continue
12106                         cp = portage.cpv_getkey(iter(selected).next())
12107                         cp_dict = unordered.get(cp)
12108                         if cp_dict is None:
12109                                 cp_dict = {}
12110                                 unordered[cp] = cp_dict
12111                                 for k in d:
12112                                         cp_dict[k] = set()
12113                         for k, v in d.iteritems():
12114                                 cp_dict[k].update(v)
12115                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12116
12117         for x in xrange(len(pkgmap)):
12118                 selected = pkgmap[x]["selected"]
12119                 if not selected:
12120                         continue
12121                 for mytype, mylist in pkgmap[x].iteritems():
12122                         if mytype == "selected":
12123                                 continue
12124                         mylist.difference_update(all_selected)
12125                 cp = portage.cpv_getkey(iter(selected).next())
12126                 for y in localtree.dep_match(cp):
12127                         if y not in pkgmap[x]["omitted"] and \
12128                                 y not in pkgmap[x]["selected"] and \
12129                                 y not in pkgmap[x]["protected"] and \
12130                                 y not in all_selected:
12131                                 pkgmap[x]["omitted"].add(y)
12132                 if global_unmerge and not pkgmap[x]["selected"]:
12133                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12134                         continue
12135                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12136                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12137                                 "'%s' is part of your system profile.\n" % cp),
12138                                 level=logging.WARNING, noiselevel=-1)
12139                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12140                                 "be damaging to your system.\n\n"),
12141                                 level=logging.WARNING, noiselevel=-1)
12142                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12143                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12144                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12145                 if not quiet:
12146                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12147                 else:
12148                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12149                 for mytype in ["selected","protected","omitted"]:
12150                         if not quiet:
12151                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12152                         if pkgmap[x][mytype]:
12153                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12154                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12155                                 for pn, ver, rev in sorted_pkgs:
12156                                         if rev == "r0":
12157                                                 myversion = ver
12158                                         else:
12159                                                 myversion = ver + "-" + rev
12160                                         if mytype == "selected":
12161                                                 writemsg_level(
12162                                                         colorize("UNMERGE_WARN", myversion + " "),
12163                                                         noiselevel=-1)
12164                                         else:
12165                                                 writemsg_level(
12166                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12167                         else:
12168                                 writemsg_level("none ", noiselevel=-1)
12169                         if not quiet:
12170                                 writemsg_level("\n", noiselevel=-1)
12171                 if quiet:
12172                         writemsg_level("\n", noiselevel=-1)
12173
12174         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12175                 " packages are slated for removal.\n")
12176         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12177                         " and " + colorize("GOOD", "'omitted'") + \
12178                         " packages will not be removed.\n\n")
12179
12180         if "--pretend" in myopts:
12181                 #we're done... return
12182                 return 0
12183         if "--ask" in myopts:
12184                 if userquery("Would you like to unmerge these packages?")=="No":
12185                         # enter pretend mode for correct formatting of results
12186                         myopts["--pretend"] = True
12187                         print
12188                         print "Quitting."
12189                         print
12190                         return 0
12191         #the real unmerging begins, after a short delay....
12192         if clean_delay and not autoclean:
12193                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12194
12195         for x in xrange(len(pkgmap)):
12196                 for y in pkgmap[x]["selected"]:
12197                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12198                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12199                         mysplit = y.split("/")
12200                         #unmerge...
12201                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12202                                 mysettings, unmerge_action not in ["clean","prune"],
12203                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12204                                 scheduler=scheduler)
12205
12206                         if retval != os.EX_OK:
12207                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12208                                 if raise_on_error:
12209                                         raise UninstallFailure(retval)
12210                                 sys.exit(retval)
12211                         else:
12212                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12213                                         sets["world"].cleanPackage(vartree.dbapi, y)
12214                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12215         if clean_world and hasattr(sets["world"], "remove"):
12216                 for s in root_config.setconfig.active:
12217                         sets["world"].remove(SETPREFIX+s)
12218         return 1
12219
12220 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12221
12222         if os.path.exists("/usr/bin/install-info"):
12223                 out = portage.output.EOutput()
12224                 regen_infodirs=[]
12225                 for z in infodirs:
12226                         if z=='':
12227                                 continue
12228                         inforoot=normpath(root+z)
12229                         if os.path.isdir(inforoot):
12230                                 infomtime = long(os.stat(inforoot).st_mtime)
12231                                 if inforoot not in prev_mtimes or \
12232                                         prev_mtimes[inforoot] != infomtime:
12233                                                 regen_infodirs.append(inforoot)
12234
12235                 if not regen_infodirs:
12236                         portage.writemsg_stdout("\n")
12237                         out.einfo("GNU info directory index is up-to-date.")
12238                 else:
12239                         portage.writemsg_stdout("\n")
12240                         out.einfo("Regenerating GNU info directory index...")
12241
12242                         dir_extensions = ("", ".gz", ".bz2")
12243                         icount=0
12244                         badcount=0
12245                         errmsg = ""
12246                         for inforoot in regen_infodirs:
12247                                 if inforoot=='':
12248                                         continue
12249
12250                                 if not os.path.isdir(inforoot) or \
12251                                         not os.access(inforoot, os.W_OK):
12252                                         continue
12253
12254                                 file_list = os.listdir(inforoot)
12255                                 file_list.sort()
12256                                 dir_file = os.path.join(inforoot, "dir")
12257                                 moved_old_dir = False
12258                                 processed_count = 0
12259                                 for x in file_list:
12260                                         if x.startswith(".") or \
12261                                                 os.path.isdir(os.path.join(inforoot, x)):
12262                                                 continue
12263                                         if x.startswith("dir"):
12264                                                 skip = False
12265                                                 for ext in dir_extensions:
12266                                                         if x == "dir" + ext or \
12267                                                                 x == "dir" + ext + ".old":
12268                                                                 skip = True
12269                                                                 break
12270                                                 if skip:
12271                                                         continue
12272                                         if processed_count == 0:
12273                                                 for ext in dir_extensions:
12274                                                         try:
12275                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12276                                                                 moved_old_dir = True
12277                                                         except EnvironmentError, e:
12278                                                                 if e.errno != errno.ENOENT:
12279                                                                         raise
12280                                                                 del e
12281                                         processed_count += 1
12282                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12283                                         existsstr="already exists, for file `"
12284                                         if myso!="":
12285                                                 if re.search(existsstr,myso):
12286                                                         # Already exists... Don't increment the count for this.
12287                                                         pass
12288                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12289                                                         # This info file doesn't contain a DIR-header: install-info produces this
12290                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12291                                                         # Don't increment the count for this.
12292                                                         pass
12293                                                 else:
12294                                                         badcount=badcount+1
12295                                                         errmsg += myso + "\n"
12296                                         icount=icount+1
12297
12298                                 if moved_old_dir and not os.path.exists(dir_file):
12299                                         # We didn't generate a new dir file, so put the old file
12300                                         # back where it was originally found.
12301                                         for ext in dir_extensions:
12302                                                 try:
12303                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12304                                                 except EnvironmentError, e:
12305                                                         if e.errno != errno.ENOENT:
12306                                                                 raise
12307                                                         del e
12308
12309                                 # Clean dir.old cruft so that they don't prevent
12310                                 # unmerge of otherwise empty directories.
12311                                 for ext in dir_extensions:
12312                                         try:
12313                                                 os.unlink(dir_file + ext + ".old")
12314                                         except EnvironmentError, e:
12315                                                 if e.errno != errno.ENOENT:
12316                                                         raise
12317                                                 del e
12318
12319                                 #update mtime so we can potentially avoid regenerating.
12320                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12321
12322                         if badcount:
12323                                 out.eerror("Processed %d info files; %d errors." % \
12324                                         (icount, badcount))
12325                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12326                         else:
12327                                 if icount > 0:
12328                                         out.einfo("Processed %d info files." % (icount,))
12329
12330
12331 def display_news_notification(root_config, myopts):
12332         target_root = root_config.root
12333         trees = root_config.trees
12334         settings = trees["vartree"].settings
12335         portdb = trees["porttree"].dbapi
12336         vardb = trees["vartree"].dbapi
12337         NEWS_PATH = os.path.join("metadata", "news")
12338         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12339         newsReaderDisplay = False
12340         update = "--pretend" not in myopts
12341
12342         for repo in portdb.getRepositories():
12343                 unreadItems = checkUpdatedNewsItems(
12344                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12345                 if unreadItems:
12346                         if not newsReaderDisplay:
12347                                 newsReaderDisplay = True
12348                                 print
12349                         print colorize("WARN", " * IMPORTANT:"),
12350                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12351                         
12352         
12353         if newsReaderDisplay:
12354                 print colorize("WARN", " *"),
12355                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12356                 print
12357
12358 def display_preserved_libs(vardbapi):
12359         MAX_DISPLAY = 3
12360
12361         # Ensure the registry is consistent with existing files.
12362         vardbapi.plib_registry.pruneNonExisting()
12363
12364         if vardbapi.plib_registry.hasEntries():
12365                 print
12366                 print colorize("WARN", "!!!") + " existing preserved libs:"
12367                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12368                 linkmap = vardbapi.linkmap
12369                 consumer_map = {}
12370                 owners = {}
12371                 linkmap_broken = False
12372
12373                 try:
12374                         linkmap.rebuild()
12375                 except portage.exception.CommandNotFound, e:
12376                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12377                                 level=logging.ERROR, noiselevel=-1)
12378                         del e
12379                         linkmap_broken = True
12380                 else:
12381                         search_for_owners = set()
12382                         for cpv in plibdata:
12383                                 internal_plib_keys = set(linkmap._obj_key(f) \
12384                                         for f in plibdata[cpv])
12385                                 for f in plibdata[cpv]:
12386                                         if f in consumer_map:
12387                                                 continue
12388                                         consumers = []
12389                                         for c in linkmap.findConsumers(f):
12390                                                 # Filter out any consumers that are also preserved libs
12391                                                 # belonging to the same package as the provider.
12392                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12393                                                         consumers.append(c)
12394                                         consumers.sort()
12395                                         consumer_map[f] = consumers
12396                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12397
12398                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12399
12400                 for cpv in plibdata:
12401                         print colorize("WARN", ">>>") + " package: %s" % cpv
12402                         samefile_map = {}
12403                         for f in plibdata[cpv]:
12404                                 obj_key = linkmap._obj_key(f)
12405                                 alt_paths = samefile_map.get(obj_key)
12406                                 if alt_paths is None:
12407                                         alt_paths = set()
12408                                         samefile_map[obj_key] = alt_paths
12409                                 alt_paths.add(f)
12410
12411                         for alt_paths in samefile_map.itervalues():
12412                                 alt_paths = sorted(alt_paths)
12413                                 for p in alt_paths:
12414                                         print colorize("WARN", " * ") + " - %s" % (p,)
12415                                 f = alt_paths[0]
12416                                 consumers = consumer_map.get(f, [])
12417                                 for c in consumers[:MAX_DISPLAY]:
12418                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12419                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12420                                 if len(consumers) == MAX_DISPLAY + 1:
12421                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12422                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12423                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12424                                 elif len(consumers) > MAX_DISPLAY:
12425                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12426                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12427
12428
12429 def _flush_elog_mod_echo():
12430         """
12431         Dump the mod_echo output now so that our other
12432         notifications are shown last.
12433         @rtype: bool
12434         @returns: True if messages were shown, False otherwise.
12435         """
12436         messages_shown = False
12437         try:
12438                 from portage.elog import mod_echo
12439         except ImportError:
12440                 pass # happens during downgrade to a version without the module
12441         else:
12442                 messages_shown = bool(mod_echo._items)
12443                 mod_echo.finalize()
12444         return messages_shown
12445
12446 def post_emerge(root_config, myopts, mtimedb, retval):
12447         """
12448         Misc. things to run at the end of a merge session.
12449         
12450         Update Info Files
12451         Update Config Files
12452         Update News Items
12453         Commit mtimeDB
12454         Display preserved libs warnings
12455         Exit Emerge
12456
12457         @param trees: A dictionary mapping each ROOT to it's package databases
12458         @type trees: dict
12459         @param mtimedb: The mtimeDB to store data needed across merge invocations
12460         @type mtimedb: MtimeDB class instance
12461         @param retval: Emerge's return value
12462         @type retval: Int
12463         @rype: None
12464         @returns:
12465         1.  Calls sys.exit(retval)
12466         """
12467
12468         target_root = root_config.root
12469         trees = { target_root : root_config.trees }
12470         vardbapi = trees[target_root]["vartree"].dbapi
12471         settings = vardbapi.settings
12472         info_mtimes = mtimedb["info"]
12473
12474         # Load the most current variables from ${ROOT}/etc/profile.env
12475         settings.unlock()
12476         settings.reload()
12477         settings.regenerate()
12478         settings.lock()
12479
12480         config_protect = settings.get("CONFIG_PROTECT","").split()
12481         infodirs = settings.get("INFOPATH","").split(":") + \
12482                 settings.get("INFODIR","").split(":")
12483
12484         os.chdir("/")
12485
12486         if retval == os.EX_OK:
12487                 exit_msg = " *** exiting successfully."
12488         else:
12489                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12490         emergelog("notitles" not in settings.features, exit_msg)
12491
12492         _flush_elog_mod_echo()
12493
12494         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12495         if "--pretend" in myopts or (counter_hash is not None and \
12496                 counter_hash == vardbapi._counter_hash()):
12497                 display_news_notification(root_config, myopts)
12498                 # If vdb state has not changed then there's nothing else to do.
12499                 sys.exit(retval)
12500
12501         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12502         portage.util.ensure_dirs(vdb_path)
12503         vdb_lock = None
12504         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12505                 vdb_lock = portage.locks.lockdir(vdb_path)
12506
12507         if vdb_lock:
12508                 try:
12509                         if "noinfo" not in settings.features:
12510                                 chk_updated_info_files(target_root,
12511                                         infodirs, info_mtimes, retval)
12512                         mtimedb.commit()
12513                 finally:
12514                         if vdb_lock:
12515                                 portage.locks.unlockdir(vdb_lock)
12516
12517         chk_updated_cfg_files(target_root, config_protect)
12518         
12519         display_news_notification(root_config, myopts)
12520         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12521                 display_preserved_libs(vardbapi)        
12522
12523         sys.exit(retval)
12524
12525
12526 def chk_updated_cfg_files(target_root, config_protect):
12527         if config_protect:
12528                 #number of directories with some protect files in them
12529                 procount=0
12530                 for x in config_protect:
12531                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12532                         if not os.access(x, os.W_OK):
12533                                 # Avoid Permission denied errors generated
12534                                 # later by `find`.
12535                                 continue
12536                         try:
12537                                 mymode = os.lstat(x).st_mode
12538                         except OSError:
12539                                 continue
12540                         if stat.S_ISLNK(mymode):
12541                                 # We want to treat it like a directory if it
12542                                 # is a symlink to an existing directory.
12543                                 try:
12544                                         real_mode = os.stat(x).st_mode
12545                                         if stat.S_ISDIR(real_mode):
12546                                                 mymode = real_mode
12547                                 except OSError:
12548                                         pass
12549                         if stat.S_ISDIR(mymode):
12550                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12551                         else:
12552                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12553                                         os.path.split(x.rstrip(os.path.sep))
12554                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12555                         a = commands.getstatusoutput(mycommand)
12556                         if a[0] != 0:
12557                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12558                                 sys.stderr.flush()
12559                                 # Show the error message alone, sending stdout to /dev/null.
12560                                 os.system(mycommand + " 1>/dev/null")
12561                         else:
12562                                 files = a[1].split('\0')
12563                                 # split always produces an empty string as the last element
12564                                 if files and not files[-1]:
12565                                         del files[-1]
12566                                 if files:
12567                                         procount += 1
12568                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12569                                         if stat.S_ISDIR(mymode):
12570                                                  print "%d config files in '%s' need updating." % \
12571                                                         (len(files), x)
12572                                         else:
12573                                                  print "config file '%s' needs updating." % x
12574
12575                 if procount:
12576                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12577                                 " section of the " + bold("emerge")
12578                         print " "+yellow("*")+" man page to learn how to update config files."
12579
12580 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12581         update=False):
12582         """
12583         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12584         Returns the number of unread (yet relevent) items.
12585         
12586         @param portdb: a portage tree database
12587         @type portdb: pordbapi
12588         @param vardb: an installed package database
12589         @type vardb: vardbapi
12590         @param NEWS_PATH:
12591         @type NEWS_PATH:
12592         @param UNREAD_PATH:
12593         @type UNREAD_PATH:
12594         @param repo_id:
12595         @type repo_id:
12596         @rtype: Integer
12597         @returns:
12598         1.  The number of unread but relevant news items.
12599         
12600         """
12601         from portage.news import NewsManager
12602         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12603         return manager.getUnreadItems( repo_id, update=update )
12604
12605 def insert_category_into_atom(atom, category):
12606         alphanum = re.search(r'\w', atom)
12607         if alphanum:
12608                 ret = atom[:alphanum.start()] + "%s/" % category + \
12609                         atom[alphanum.start():]
12610         else:
12611                 ret = None
12612         return ret
12613
12614 def is_valid_package_atom(x):
12615         if "/" not in x:
12616                 alphanum = re.search(r'\w', x)
12617                 if alphanum:
12618                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12619         return portage.isvalidatom(x)
12620
12621 def show_blocker_docs_link():
12622         print
12623         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12624         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12625         print
12626         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12627         print
12628
12629 def show_mask_docs():
12630         print "For more information, see the MASKED PACKAGES section in the emerge"
12631         print "man page or refer to the Gentoo Handbook."
12632
12633 def action_sync(settings, trees, mtimedb, myopts, myaction):
12634         xterm_titles = "notitles" not in settings.features
12635         emergelog(xterm_titles, " === sync")
12636         myportdir = settings.get("PORTDIR", None)
12637         out = portage.output.EOutput()
12638         if not myportdir:
12639                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12640                 sys.exit(1)
12641         if myportdir[-1]=="/":
12642                 myportdir=myportdir[:-1]
12643         try:
12644                 st = os.stat(myportdir)
12645         except OSError:
12646                 st = None
12647         if st is None:
12648                 print ">>>",myportdir,"not found, creating it."
12649                 os.makedirs(myportdir,0755)
12650                 st = os.stat(myportdir)
12651
12652         spawn_kwargs = {}
12653         spawn_kwargs["env"] = settings.environ()
12654         if 'usersync' in settings.features and \
12655                 portage.data.secpass >= 2 and \
12656                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12657                 st.st_gid != os.getgid() and st.st_mode & 0070):
12658                 try:
12659                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12660                 except KeyError:
12661                         pass
12662                 else:
12663                         # Drop privileges when syncing, in order to match
12664                         # existing uid/gid settings.
12665                         spawn_kwargs["uid"]    = st.st_uid
12666                         spawn_kwargs["gid"]    = st.st_gid
12667                         spawn_kwargs["groups"] = [st.st_gid]
12668                         spawn_kwargs["env"]["HOME"] = homedir
12669                         umask = 0002
12670                         if not st.st_mode & 0020:
12671                                 umask = umask | 0020
12672                         spawn_kwargs["umask"] = umask
12673
12674         syncuri = settings.get("SYNC", "").strip()
12675         if not syncuri:
12676                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12677                         noiselevel=-1, level=logging.ERROR)
12678                 return 1
12679
12680         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12681         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12682
12683         os.umask(0022)
12684         dosyncuri = syncuri
12685         updatecache_flg = False
12686         if myaction == "metadata":
12687                 print "skipping sync"
12688                 updatecache_flg = True
12689         elif ".git" in vcs_dirs:
12690                 # Update existing git repository, and ignore the syncuri. We are
12691                 # going to trust the user and assume that the user is in the branch
12692                 # that he/she wants updated. We'll let the user manage branches with
12693                 # git directly.
12694                 if portage.process.find_binary("git") is None:
12695                         msg = ["Command not found: git",
12696                         "Type \"emerge dev-util/git\" to enable git support."]
12697                         for l in msg:
12698                                 writemsg_level("!!! %s\n" % l,
12699                                         level=logging.ERROR, noiselevel=-1)
12700                         return 1
12701                 msg = ">>> Starting git pull in %s..." % myportdir
12702                 emergelog(xterm_titles, msg )
12703                 writemsg_level(msg + "\n")
12704                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12705                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12706                 if exitcode != os.EX_OK:
12707                         msg = "!!! git pull error in %s." % myportdir
12708                         emergelog(xterm_titles, msg)
12709                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12710                         return exitcode
12711                 msg = ">>> Git pull in %s successful" % myportdir
12712                 emergelog(xterm_titles, msg)
12713                 writemsg_level(msg + "\n")
12714                 exitcode = git_sync_timestamps(settings, myportdir)
12715                 if exitcode == os.EX_OK:
12716                         updatecache_flg = True
12717         elif syncuri[:8]=="rsync://":
12718                 for vcs_dir in vcs_dirs:
12719                         writemsg_level(("!!! %s appears to be under revision " + \
12720                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12721                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12722                         return 1
12723                 if not os.path.exists("/usr/bin/rsync"):
12724                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12725                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12726                         sys.exit(1)
12727                 mytimeout=180
12728
12729                 rsync_opts = []
12730                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12731                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12732                         rsync_opts.extend([
12733                                 "--recursive",    # Recurse directories
12734                                 "--links",        # Consider symlinks
12735                                 "--safe-links",   # Ignore links outside of tree
12736                                 "--perms",        # Preserve permissions
12737                                 "--times",        # Preserive mod times
12738                                 "--compress",     # Compress the data transmitted
12739                                 "--force",        # Force deletion on non-empty dirs
12740                                 "--whole-file",   # Don't do block transfers, only entire files
12741                                 "--delete",       # Delete files that aren't in the master tree
12742                                 "--stats",        # Show final statistics about what was transfered
12743                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12744                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12745                                 "--exclude=/local",       # Exclude local     from consideration
12746                                 "--exclude=/packages",    # Exclude packages  from consideration
12747                         ])
12748
12749                 else:
12750                         # The below validation is not needed when using the above hardcoded
12751                         # defaults.
12752
12753                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12754                         rsync_opts.extend(
12755                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12756                         for opt in ("--recursive", "--times"):
12757                                 if opt not in rsync_opts:
12758                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12759                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12760                                         rsync_opts.append(opt)
12761         
12762                         for exclude in ("distfiles", "local", "packages"):
12763                                 opt = "--exclude=/%s" % exclude
12764                                 if opt not in rsync_opts:
12765                                         portage.writemsg(yellow("WARNING:") + \
12766                                         " adding required option %s not included in "  % opt + \
12767                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12768                                         rsync_opts.append(opt)
12769         
12770                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12771                                 def rsync_opt_startswith(opt_prefix):
12772                                         for x in rsync_opts:
12773                                                 if x.startswith(opt_prefix):
12774                                                         return True
12775                                         return False
12776
12777                                 if not rsync_opt_startswith("--timeout="):
12778                                         rsync_opts.append("--timeout=%d" % mytimeout)
12779
12780                                 for opt in ("--compress", "--whole-file"):
12781                                         if opt not in rsync_opts:
12782                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12783                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12784                                                 rsync_opts.append(opt)
12785
12786                 if "--quiet" in myopts:
12787                         rsync_opts.append("--quiet")    # Shut up a lot
12788                 else:
12789                         rsync_opts.append("--verbose")  # Print filelist
12790
12791                 if "--verbose" in myopts:
12792                         rsync_opts.append("--progress")  # Progress meter for each file
12793
12794                 if "--debug" in myopts:
12795                         rsync_opts.append("--checksum") # Force checksum on all files
12796
12797                 # Real local timestamp file.
12798                 servertimestampfile = os.path.join(
12799                         myportdir, "metadata", "timestamp.chk")
12800
12801                 content = portage.util.grabfile(servertimestampfile)
12802                 mytimestamp = 0
12803                 if content:
12804                         try:
12805                                 mytimestamp = time.mktime(time.strptime(content[0],
12806                                         "%a, %d %b %Y %H:%M:%S +0000"))
12807                         except (OverflowError, ValueError):
12808                                 pass
12809                 del content
12810
12811                 try:
12812                         rsync_initial_timeout = \
12813                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12814                 except ValueError:
12815                         rsync_initial_timeout = 15
12816
12817                 try:
12818                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12819                 except SystemExit, e:
12820                         raise # Needed else can't exit
12821                 except:
12822                         maxretries=3 #default number of retries
12823
12824                 retries=0
12825                 user_name, hostname, port = re.split(
12826                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12827                 if port is None:
12828                         port=""
12829                 if user_name is None:
12830                         user_name=""
12831                 updatecache_flg=True
12832                 all_rsync_opts = set(rsync_opts)
12833                 extra_rsync_opts = shlex.split(
12834                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12835                 all_rsync_opts.update(extra_rsync_opts)
12836                 family = socket.AF_INET
12837                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12838                         family = socket.AF_INET
12839                 elif socket.has_ipv6 and \
12840                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12841                         family = socket.AF_INET6
12842                 ips=[]
12843                 SERVER_OUT_OF_DATE = -1
12844                 EXCEEDED_MAX_RETRIES = -2
12845                 while (1):
12846                         if ips:
12847                                 del ips[0]
12848                         if ips==[]:
12849                                 try:
12850                                         for addrinfo in socket.getaddrinfo(
12851                                                 hostname, None, family, socket.SOCK_STREAM):
12852                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12853                                                         # IPv6 addresses need to be enclosed in square brackets
12854                                                         ips.append("[%s]" % addrinfo[4][0])
12855                                                 else:
12856                                                         ips.append(addrinfo[4][0])
12857                                         from random import shuffle
12858                                         shuffle(ips)
12859                                 except SystemExit, e:
12860                                         raise # Needed else can't exit
12861                                 except Exception, e:
12862                                         print "Notice:",str(e)
12863                                         dosyncuri=syncuri
12864
12865                         if ips:
12866                                 try:
12867                                         dosyncuri = syncuri.replace(
12868                                                 "//" + user_name + hostname + port + "/",
12869                                                 "//" + user_name + ips[0] + port + "/", 1)
12870                                 except SystemExit, e:
12871                                         raise # Needed else can't exit
12872                                 except Exception, e:
12873                                         print "Notice:",str(e)
12874                                         dosyncuri=syncuri
12875
12876                         if (retries==0):
12877                                 if "--ask" in myopts:
12878                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12879                                                 print
12880                                                 print "Quitting."
12881                                                 print
12882                                                 sys.exit(0)
12883                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12884                                 if "--quiet" not in myopts:
12885                                         print ">>> Starting rsync with "+dosyncuri+"..."
12886                         else:
12887                                 emergelog(xterm_titles,
12888                                         ">>> Starting retry %d of %d with %s" % \
12889                                                 (retries,maxretries,dosyncuri))
12890                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12891
12892                         if mytimestamp != 0 and "--quiet" not in myopts:
12893                                 print ">>> Checking server timestamp ..."
12894
12895                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12896
12897                         if "--debug" in myopts:
12898                                 print rsynccommand
12899
12900                         exitcode = os.EX_OK
12901                         servertimestamp = 0
12902                         # Even if there's no timestamp available locally, fetch the
12903                         # timestamp anyway as an initial probe to verify that the server is
12904                         # responsive.  This protects us from hanging indefinitely on a
12905                         # connection attempt to an unresponsive server which rsync's
12906                         # --timeout option does not prevent.
12907                         if True:
12908                                 # Temporary file for remote server timestamp comparison.
12909                                 from tempfile import mkstemp
12910                                 fd, tmpservertimestampfile = mkstemp()
12911                                 os.close(fd)
12912                                 mycommand = rsynccommand[:]
12913                                 mycommand.append(dosyncuri.rstrip("/") + \
12914                                         "/metadata/timestamp.chk")
12915                                 mycommand.append(tmpservertimestampfile)
12916                                 content = None
12917                                 mypids = []
12918                                 try:
12919                                         def timeout_handler(signum, frame):
12920                                                 raise portage.exception.PortageException("timed out")
12921                                         signal.signal(signal.SIGALRM, timeout_handler)
12922                                         # Timeout here in case the server is unresponsive.  The
12923                                         # --timeout rsync option doesn't apply to the initial
12924                                         # connection attempt.
12925                                         if rsync_initial_timeout:
12926                                                 signal.alarm(rsync_initial_timeout)
12927                                         try:
12928                                                 mypids.extend(portage.process.spawn(
12929                                                         mycommand, env=settings.environ(), returnpid=True))
12930                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12931                                                 content = portage.grabfile(tmpservertimestampfile)
12932                                         finally:
12933                                                 if rsync_initial_timeout:
12934                                                         signal.alarm(0)
12935                                                 try:
12936                                                         os.unlink(tmpservertimestampfile)
12937                                                 except OSError:
12938                                                         pass
12939                                 except portage.exception.PortageException, e:
12940                                         # timed out
12941                                         print e
12942                                         del e
12943                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12944                                                 os.kill(mypids[0], signal.SIGTERM)
12945                                                 os.waitpid(mypids[0], 0)
12946                                         # This is the same code rsync uses for timeout.
12947                                         exitcode = 30
12948                                 else:
12949                                         if exitcode != os.EX_OK:
12950                                                 if exitcode & 0xff:
12951                                                         exitcode = (exitcode & 0xff) << 8
12952                                                 else:
12953                                                         exitcode = exitcode >> 8
12954                                 if mypids:
12955                                         portage.process.spawned_pids.remove(mypids[0])
12956                                 if content:
12957                                         try:
12958                                                 servertimestamp = time.mktime(time.strptime(
12959                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12960                                         except (OverflowError, ValueError):
12961                                                 pass
12962                                 del mycommand, mypids, content
12963                         if exitcode == os.EX_OK:
12964                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12965                                         emergelog(xterm_titles,
12966                                                 ">>> Cancelling sync -- Already current.")
12967                                         print
12968                                         print ">>>"
12969                                         print ">>> Timestamps on the server and in the local repository are the same."
12970                                         print ">>> Cancelling all further sync action. You are already up to date."
12971                                         print ">>>"
12972                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12973                                         print ">>>"
12974                                         print
12975                                         sys.exit(0)
12976                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12977                                         emergelog(xterm_titles,
12978                                                 ">>> Server out of date: %s" % dosyncuri)
12979                                         print
12980                                         print ">>>"
12981                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12982                                         print ">>>"
12983                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12984                                         print ">>>"
12985                                         print
12986                                         exitcode = SERVER_OUT_OF_DATE
12987                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12988                                         # actual sync
12989                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12990                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12991                                         if exitcode in [0,1,3,4,11,14,20,21]:
12992                                                 break
12993                         elif exitcode in [1,3,4,11,14,20,21]:
12994                                 break
12995                         else:
12996                                 # Code 2 indicates protocol incompatibility, which is expected
12997                                 # for servers with protocol < 29 that don't support
12998                                 # --prune-empty-directories.  Retry for a server that supports
12999                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
13000                                 pass
13001
13002                         retries=retries+1
13003
13004                         if retries<=maxretries:
13005                                 print ">>> Retrying..."
13006                                 time.sleep(11)
13007                         else:
13008                                 # over retries
13009                                 # exit loop
13010                                 updatecache_flg=False
13011                                 exitcode = EXCEEDED_MAX_RETRIES
13012                                 break
13013
13014                 if (exitcode==0):
13015                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13016                 elif exitcode == SERVER_OUT_OF_DATE:
13017                         sys.exit(1)
13018                 elif exitcode == EXCEEDED_MAX_RETRIES:
13019                         sys.stderr.write(
13020                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13021                         sys.exit(1)
13022                 elif (exitcode>0):
13023                         msg = []
13024                         if exitcode==1:
13025                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13026                                 msg.append("that your SYNC statement is proper.")
13027                                 msg.append("SYNC=" + settings["SYNC"])
13028                         elif exitcode==11:
13029                                 msg.append("Rsync has reported that there is a File IO error. Normally")
13030                                 msg.append("this means your disk is full, but can be caused by corruption")
13031                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13032                                 msg.append("and try again after the problem has been fixed.")
13033                                 msg.append("PORTDIR=" + settings["PORTDIR"])
13034                         elif exitcode==20:
13035                                 msg.append("Rsync was killed before it finished.")
13036                         else:
13037                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13038                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13039                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13040                                 msg.append("temporary problem unless complications exist with your network")
13041                                 msg.append("(and possibly your system's filesystem) configuration.")
13042                         for line in msg:
13043                                 out.eerror(line)
13044                         sys.exit(exitcode)
13045         elif syncuri[:6]=="cvs://":
13046                 if not os.path.exists("/usr/bin/cvs"):
13047                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13048                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13049                         sys.exit(1)
13050                 cvsroot=syncuri[6:]
13051                 cvsdir=os.path.dirname(myportdir)
13052                 if not os.path.exists(myportdir+"/CVS"):
13053                         #initial checkout
13054                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13055                         if os.path.exists(cvsdir+"/gentoo-x86"):
13056                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13057                                 sys.exit(1)
13058                         try:
13059                                 os.rmdir(myportdir)
13060                         except OSError, e:
13061                                 if e.errno != errno.ENOENT:
13062                                         sys.stderr.write(
13063                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13064                                         sys.exit(1)
13065                                 del e
13066                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13067                                 print "!!! cvs checkout error; exiting."
13068                                 sys.exit(1)
13069                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13070                 else:
13071                         #cvs update
13072                         print ">>> Starting cvs update with "+syncuri+"..."
13073                         retval = portage.process.spawn_bash(
13074                                 "cd %s; cvs -z0 -q update -dP" % \
13075                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13076                         if retval != os.EX_OK:
13077                                 sys.exit(retval)
13078                 dosyncuri = syncuri
13079         else:
13080                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13081                         noiselevel=-1, level=logging.ERROR)
13082                 return 1
13083
13084         if updatecache_flg and  \
13085                 myaction != "metadata" and \
13086                 "metadata-transfer" not in settings.features:
13087                 updatecache_flg = False
13088
13089         # Reload the whole config from scratch.
13090         settings, trees, mtimedb = load_emerge_config(trees=trees)
13091         root_config = trees[settings["ROOT"]]["root_config"]
13092         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13093
13094         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13095                 action_metadata(settings, portdb, myopts)
13096
13097         if portage._global_updates(trees, mtimedb["updates"]):
13098                 mtimedb.commit()
13099                 # Reload the whole config from scratch.
13100                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13101                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13102                 root_config = trees[settings["ROOT"]]["root_config"]
13103
13104         mybestpv = portdb.xmatch("bestmatch-visible",
13105                 portage.const.PORTAGE_PACKAGE_ATOM)
13106         mypvs = portage.best(
13107                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13108                 portage.const.PORTAGE_PACKAGE_ATOM))
13109
13110         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13111
13112         if myaction != "metadata":
13113                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13114                         retval = portage.process.spawn(
13115                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13116                                 dosyncuri], env=settings.environ())
13117                         if retval != os.EX_OK:
13118                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13119
13120         if(mybestpv != mypvs) and not "--quiet" in myopts:
13121                 print
13122                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13123                 print red(" * ")+"that you update portage now, before any other packages are updated."
13124                 print
13125                 print red(" * ")+"To update portage, run 'emerge portage' now."
13126                 print
13127         
13128         display_news_notification(root_config, myopts)
13129         return os.EX_OK
13130
13131 def git_sync_timestamps(settings, portdir):
13132         """
13133         Since git doesn't preserve timestamps, synchronize timestamps between
13134         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13135         for a given file as long as the file in the working tree is not modified
13136         (relative to HEAD).
13137         """
13138         cache_dir = os.path.join(portdir, "metadata", "cache")
13139         if not os.path.isdir(cache_dir):
13140                 return os.EX_OK
13141         writemsg_level(">>> Synchronizing timestamps...\n")
13142
13143         from portage.cache.cache_errors import CacheError
13144         try:
13145                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13146                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13147         except CacheError, e:
13148                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13149                         level=logging.ERROR, noiselevel=-1)
13150                 return 1
13151
13152         ec_dir = os.path.join(portdir, "eclass")
13153         try:
13154                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13155                         if f.endswith(".eclass"))
13156         except OSError, e:
13157                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13158                         level=logging.ERROR, noiselevel=-1)
13159                 return 1
13160
13161         args = [portage.const.BASH_BINARY, "-c",
13162                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13163                 portage._shell_quote(portdir)]
13164         import subprocess
13165         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13166         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13167         rval = proc.wait()
13168         if rval != os.EX_OK:
13169                 return rval
13170
13171         modified_eclasses = set(ec for ec in ec_names \
13172                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13173
13174         updated_ec_mtimes = {}
13175
13176         for cpv in cache_db:
13177                 cpv_split = portage.catpkgsplit(cpv)
13178                 if cpv_split is None:
13179                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13180                                 level=logging.ERROR, noiselevel=-1)
13181                         continue
13182
13183                 cat, pn, ver, rev = cpv_split
13184                 cat, pf = portage.catsplit(cpv)
13185                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13186                 if relative_eb_path in modified_files:
13187                         continue
13188
13189                 try:
13190                         cache_entry = cache_db[cpv]
13191                         eb_mtime = cache_entry.get("_mtime_")
13192                         ec_mtimes = cache_entry.get("_eclasses_")
13193                 except KeyError:
13194                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13195                                 level=logging.ERROR, noiselevel=-1)
13196                         continue
13197                 except CacheError, e:
13198                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13199                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13200                         continue
13201
13202                 if eb_mtime is None:
13203                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13204                                 level=logging.ERROR, noiselevel=-1)
13205                         continue
13206
13207                 try:
13208                         eb_mtime = long(eb_mtime)
13209                 except ValueError:
13210                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13211                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13212                         continue
13213
13214                 if ec_mtimes is None:
13215                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13216                                 level=logging.ERROR, noiselevel=-1)
13217                         continue
13218
13219                 if modified_eclasses.intersection(ec_mtimes):
13220                         continue
13221
13222                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13223                 if missing_eclasses:
13224                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13225                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13226                                 noiselevel=-1)
13227                         continue
13228
13229                 eb_path = os.path.join(portdir, relative_eb_path)
13230                 try:
13231                         current_eb_mtime = os.stat(eb_path)
13232                 except OSError:
13233                         writemsg_level("!!! Missing ebuild: %s\n" % \
13234                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13235                         continue
13236
13237                 inconsistent = False
13238                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13239                         updated_mtime = updated_ec_mtimes.get(ec)
13240                         if updated_mtime is not None and updated_mtime != ec_mtime:
13241                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13242                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13243                                 inconsistent = True
13244                                 break
13245
13246                 if inconsistent:
13247                         continue
13248
13249                 if current_eb_mtime != eb_mtime:
13250                         os.utime(eb_path, (eb_mtime, eb_mtime))
13251
13252                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13253                         if ec in updated_ec_mtimes:
13254                                 continue
13255                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13256                         current_mtime = long(os.stat(ec_path).st_mtime)
13257                         if current_mtime != ec_mtime:
13258                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13259                         updated_ec_mtimes[ec] = ec_mtime
13260
13261         return os.EX_OK
13262
13263 def action_metadata(settings, portdb, myopts):
13264         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13265         old_umask = os.umask(0002)
13266         cachedir = os.path.normpath(settings.depcachedir)
13267         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13268                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13269                                         "/sys", "/tmp", "/usr",  "/var"]:
13270                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13271                         "ROOT DIRECTORY ON YOUR SYSTEM."
13272                 print >> sys.stderr, \
13273                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13274                 sys.exit(73)
13275         if not os.path.exists(cachedir):
13276                 os.mkdir(cachedir)
13277
13278         ec = portage.eclass_cache.cache(portdb.porttree_root)
13279         myportdir = os.path.realpath(settings["PORTDIR"])
13280         cm = settings.load_best_module("portdbapi.metadbmodule")(
13281                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13282
13283         from portage.cache import util
13284
13285         class percentage_noise_maker(util.quiet_mirroring):
13286                 def __init__(self, dbapi):
13287                         self.dbapi = dbapi
13288                         self.cp_all = dbapi.cp_all()
13289                         l = len(self.cp_all)
13290                         self.call_update_min = 100000000
13291                         self.min_cp_all = l/100.0
13292                         self.count = 1
13293                         self.pstr = ''
13294
13295                 def __iter__(self):
13296                         for x in self.cp_all:
13297                                 self.count += 1
13298                                 if self.count > self.min_cp_all:
13299                                         self.call_update_min = 0
13300                                         self.count = 0
13301                                 for y in self.dbapi.cp_list(x):
13302                                         yield y
13303                         self.call_update_mine = 0
13304
13305                 def update(self, *arg):
13306                         try:
13307                                 self.pstr = int(self.pstr) + 1
13308                         except ValueError:
13309                                 self.pstr = 1
13310                         sys.stdout.write("%s%i%%" % \
13311                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13312                         sys.stdout.flush()
13313                         self.call_update_min = 10000000
13314
13315                 def finish(self, *arg):
13316                         sys.stdout.write("\b\b\b\b100%\n")
13317                         sys.stdout.flush()
13318
13319         if "--quiet" in myopts:
13320                 def quicky_cpv_generator(cp_all_list):
13321                         for x in cp_all_list:
13322                                 for y in portdb.cp_list(x):
13323                                         yield y
13324                 source = quicky_cpv_generator(portdb.cp_all())
13325                 noise_maker = portage.cache.util.quiet_mirroring()
13326         else:
13327                 noise_maker = source = percentage_noise_maker(portdb)
13328         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13329                 eclass_cache=ec, verbose_instance=noise_maker)
13330
13331         sys.stdout.flush()
13332         os.umask(old_umask)
13333
13334 def action_regen(settings, portdb, max_jobs, max_load):
13335         xterm_titles = "notitles" not in settings.features
13336         emergelog(xterm_titles, " === regen")
13337         #regenerate cache entries
13338         portage.writemsg_stdout("Regenerating cache entries...\n")
13339         try:
13340                 os.close(sys.stdin.fileno())
13341         except SystemExit, e:
13342                 raise # Needed else can't exit
13343         except:
13344                 pass
13345         sys.stdout.flush()
13346
13347         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13348         regen.run()
13349
13350         portage.writemsg_stdout("done!\n")
13351         return regen.returncode
13352
13353 def action_config(settings, trees, myopts, myfiles):
13354         if len(myfiles) != 1:
13355                 print red("!!! config can only take a single package atom at this time\n")
13356                 sys.exit(1)
13357         if not is_valid_package_atom(myfiles[0]):
13358                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13359                         noiselevel=-1)
13360                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13361                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13362                 sys.exit(1)
13363         print
13364         try:
13365                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13366         except portage.exception.AmbiguousPackageName, e:
13367                 # Multiple matches thrown from cpv_expand
13368                 pkgs = e.args[0]
13369         if len(pkgs) == 0:
13370                 print "No packages found.\n"
13371                 sys.exit(0)
13372         elif len(pkgs) > 1:
13373                 if "--ask" in myopts:
13374                         options = []
13375                         print "Please select a package to configure:"
13376                         idx = 0
13377                         for pkg in pkgs:
13378                                 idx += 1
13379                                 options.append(str(idx))
13380                                 print options[-1]+") "+pkg
13381                         print "X) Cancel"
13382                         options.append("X")
13383                         idx = userquery("Selection?", options)
13384                         if idx == "X":
13385                                 sys.exit(0)
13386                         pkg = pkgs[int(idx)-1]
13387                 else:
13388                         print "The following packages available:"
13389                         for pkg in pkgs:
13390                                 print "* "+pkg
13391                         print "\nPlease use a specific atom or the --ask option."
13392                         sys.exit(1)
13393         else:
13394                 pkg = pkgs[0]
13395
13396         print
13397         if "--ask" in myopts:
13398                 if userquery("Ready to configure "+pkg+"?") == "No":
13399                         sys.exit(0)
13400         else:
13401                 print "Configuring pkg..."
13402         print
13403         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13404         mysettings = portage.config(clone=settings)
13405         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13406         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13407         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13408                 mysettings,
13409                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13410                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13411         if retval == os.EX_OK:
13412                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13413                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13414         print
13415
13416 def action_info(settings, trees, myopts, myfiles):
13417         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13418                 settings.profile_path, settings["CHOST"],
13419                 trees[settings["ROOT"]]["vartree"].dbapi)
13420         header_width = 65
13421         header_title = "System Settings"
13422         if myfiles:
13423                 print header_width * "="
13424                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13425         print header_width * "="
13426         print "System uname: "+platform.platform(aliased=1)
13427
13428         lastSync = portage.grabfile(os.path.join(
13429                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13430         print "Timestamp of tree:",
13431         if lastSync:
13432                 print lastSync[0]
13433         else:
13434                 print "Unknown"
13435
13436         output=commands.getstatusoutput("distcc --version")
13437         if not output[0]:
13438                 print str(output[1].split("\n",1)[0]),
13439                 if "distcc" in settings.features:
13440                         print "[enabled]"
13441                 else:
13442                         print "[disabled]"
13443
13444         output=commands.getstatusoutput("ccache -V")
13445         if not output[0]:
13446                 print str(output[1].split("\n",1)[0]),
13447                 if "ccache" in settings.features:
13448                         print "[enabled]"
13449                 else:
13450                         print "[disabled]"
13451
13452         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13453                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13454         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13455         myvars  = portage.util.unique_array(myvars)
13456         myvars.sort()
13457
13458         for x in myvars:
13459                 if portage.isvalidatom(x):
13460                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13461                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13462                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13463                         pkgs = []
13464                         for pn, ver, rev in pkg_matches:
13465                                 if rev != "r0":
13466                                         pkgs.append(ver + "-" + rev)
13467                                 else:
13468                                         pkgs.append(ver)
13469                         if pkgs:
13470                                 pkgs = ", ".join(pkgs)
13471                                 print "%-20s %s" % (x+":", pkgs)
13472                 else:
13473                         print "%-20s %s" % (x+":", "[NOT VALID]")
13474
13475         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13476
13477         if "--verbose" in myopts:
13478                 myvars=settings.keys()
13479         else:
13480                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13481                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13482                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13483                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13484
13485                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13486
13487         myvars = portage.util.unique_array(myvars)
13488         unset_vars = []
13489         myvars.sort()
13490         for x in myvars:
13491                 if x in settings:
13492                         if x != "USE":
13493                                 print '%s="%s"' % (x, settings[x])
13494                         else:
13495                                 use = set(settings["USE"].split())
13496                                 use_expand = settings["USE_EXPAND"].split()
13497                                 use_expand.sort()
13498                                 for varname in use_expand:
13499                                         flag_prefix = varname.lower() + "_"
13500                                         for f in list(use):
13501                                                 if f.startswith(flag_prefix):
13502                                                         use.remove(f)
13503                                 use = list(use)
13504                                 use.sort()
13505                                 print 'USE="%s"' % " ".join(use),
13506                                 for varname in use_expand:
13507                                         myval = settings.get(varname)
13508                                         if myval:
13509                                                 print '%s="%s"' % (varname, myval),
13510                                 print
13511                 else:
13512                         unset_vars.append(x)
13513         if unset_vars:
13514                 print "Unset:  "+", ".join(unset_vars)
13515         print
13516
13517         if "--debug" in myopts:
13518                 for x in dir(portage):
13519                         module = getattr(portage, x)
13520                         if "cvs_id_string" in dir(module):
13521                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13522
13523         # See if we can find any packages installed matching the strings
13524         # passed on the command line
13525         mypkgs = []
13526         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13527         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13528         for x in myfiles:
13529                 mypkgs.extend(vardb.match(x))
13530
13531         # If some packages were found...
13532         if mypkgs:
13533                 # Get our global settings (we only print stuff if it varies from
13534                 # the current config)
13535                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13536                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13537                 global_vals = {}
13538                 pkgsettings = portage.config(clone=settings)
13539
13540                 for myvar in mydesiredvars:
13541                         global_vals[myvar] = set(settings.get(myvar, "").split())
13542
13543                 # Loop through each package
13544                 # Only print settings if they differ from global settings
13545                 header_title = "Package Settings"
13546                 print header_width * "="
13547                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13548                 print header_width * "="
13549                 from portage.output import EOutput
13550                 out = EOutput()
13551                 for pkg in mypkgs:
13552                         # Get all package specific variables
13553                         auxvalues = vardb.aux_get(pkg, auxkeys)
13554                         valuesmap = {}
13555                         for i in xrange(len(auxkeys)):
13556                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13557                         diff_values = {}
13558                         for myvar in mydesiredvars:
13559                                 # If the package variable doesn't match the
13560                                 # current global variable, something has changed
13561                                 # so set diff_found so we know to print
13562                                 if valuesmap[myvar] != global_vals[myvar]:
13563                                         diff_values[myvar] = valuesmap[myvar]
13564                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13565                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13566                         pkgsettings.reset()
13567                         # If a matching ebuild is no longer available in the tree, maybe it
13568                         # would make sense to compare against the flags for the best
13569                         # available version with the same slot?
13570                         mydb = None
13571                         if portdb.cpv_exists(pkg):
13572                                 mydb = portdb
13573                         pkgsettings.setcpv(pkg, mydb=mydb)
13574                         if valuesmap["IUSE"].intersection(
13575                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13576                                 diff_values["USE"] = valuesmap["USE"]
13577                         # If a difference was found, print the info for
13578                         # this package.
13579                         if diff_values:
13580                                 # Print package info
13581                                 print "%s was built with the following:" % pkg
13582                                 for myvar in mydesiredvars + ["USE"]:
13583                                         if myvar in diff_values:
13584                                                 mylist = list(diff_values[myvar])
13585                                                 mylist.sort()
13586                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13587                                 print
13588                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13589                         ebuildpath = vardb.findname(pkg)
13590                         if not ebuildpath or not os.path.exists(ebuildpath):
13591                                 out.ewarn("No ebuild found for '%s'" % pkg)
13592                                 continue
13593                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13594                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13595                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13596                                 tree="vartree")
13597
13598 def action_search(root_config, myopts, myfiles, spinner):
13599         if not myfiles:
13600                 print "emerge: no search terms provided."
13601         else:
13602                 searchinstance = search(root_config,
13603                         spinner, "--searchdesc" in myopts,
13604                         "--quiet" not in myopts, "--usepkg" in myopts,
13605                         "--usepkgonly" in myopts)
13606                 for mysearch in myfiles:
13607                         try:
13608                                 searchinstance.execute(mysearch)
13609                         except re.error, comment:
13610                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13611                                 sys.exit(1)
13612                         searchinstance.output()
13613
13614 def action_depclean(settings, trees, ldpath_mtimes,
13615         myopts, action, myfiles, spinner):
13616         # Kill packages that aren't explicitly merged or are required as a
13617         # dependency of another package. World file is explicit.
13618
13619         # Global depclean or prune operations are not very safe when there are
13620         # missing dependencies since it's unknown how badly incomplete
13621         # the dependency graph is, and we might accidentally remove packages
13622         # that should have been pulled into the graph. On the other hand, it's
13623         # relatively safe to ignore missing deps when only asked to remove
13624         # specific packages.
13625         allow_missing_deps = len(myfiles) > 0
13626
13627         msg = []
13628         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13629         msg.append("mistakes. Packages that are part of the world set will always\n")
13630         msg.append("be kept.  They can be manually added to this set with\n")
13631         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13632         msg.append("package.provided (see portage(5)) will be removed by\n")
13633         msg.append("depclean, even if they are part of the world set.\n")
13634         msg.append("\n")
13635         msg.append("As a safety measure, depclean will not remove any packages\n")
13636         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13637         msg.append("consequence, it is often necessary to run %s\n" % \
13638                 good("`emerge --update"))
13639         msg.append(good("--newuse --deep @system @world`") + \
13640                 " prior to depclean.\n")
13641
13642         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13643                 portage.writemsg_stdout("\n")
13644                 for x in msg:
13645                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13646
13647         xterm_titles = "notitles" not in settings.features
13648         myroot = settings["ROOT"]
13649         root_config = trees[myroot]["root_config"]
13650         getSetAtoms = root_config.setconfig.getSetAtoms
13651         vardb = trees[myroot]["vartree"].dbapi
13652
13653         required_set_names = ("system", "world")
13654         required_sets = {}
13655         set_args = []
13656
13657         for s in required_set_names:
13658                 required_sets[s] = InternalPackageSet(
13659                         initial_atoms=getSetAtoms(s))
13660
13661         
13662         # When removing packages, use a temporary version of world
13663         # which excludes packages that are intended to be eligible for
13664         # removal.
13665         world_temp_set = required_sets["world"]
13666         system_set = required_sets["system"]
13667
13668         if not system_set or not world_temp_set:
13669
13670                 if not system_set:
13671                         writemsg_level("!!! You have no system list.\n",
13672                                 level=logging.ERROR, noiselevel=-1)
13673
13674                 if not world_temp_set:
13675                         writemsg_level("!!! You have no world file.\n",
13676                                         level=logging.WARNING, noiselevel=-1)
13677
13678                 writemsg_level("!!! Proceeding is likely to " + \
13679                         "break your installation.\n",
13680                         level=logging.WARNING, noiselevel=-1)
13681                 if "--pretend" not in myopts:
13682                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13683
13684         if action == "depclean":
13685                 emergelog(xterm_titles, " >>> depclean")
13686
13687         import textwrap
13688         args_set = InternalPackageSet()
13689         if myfiles:
13690                 for x in myfiles:
13691                         if not is_valid_package_atom(x):
13692                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13693                                         level=logging.ERROR, noiselevel=-1)
13694                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13695                                 return
13696                         try:
13697                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13698                         except portage.exception.AmbiguousPackageName, e:
13699                                 msg = "The short ebuild name \"" + x + \
13700                                         "\" is ambiguous.  Please specify " + \
13701                                         "one of the following " + \
13702                                         "fully-qualified ebuild names instead:"
13703                                 for line in textwrap.wrap(msg, 70):
13704                                         writemsg_level("!!! %s\n" % (line,),
13705                                                 level=logging.ERROR, noiselevel=-1)
13706                                 for i in e[0]:
13707                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13708                                                 level=logging.ERROR, noiselevel=-1)
13709                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13710                                 return
13711                         args_set.add(atom)
13712                 matched_packages = False
13713                 for x in args_set:
13714                         if vardb.match(x):
13715                                 matched_packages = True
13716                                 break
13717                 if not matched_packages:
13718                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13719                                 action)
13720                         return
13721
13722         writemsg_level("\nCalculating dependencies  ")
13723         resolver_params = create_depgraph_params(myopts, "remove")
13724         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13725         vardb = resolver.trees[myroot]["vartree"].dbapi
13726
13727         if action == "depclean":
13728
13729                 if args_set:
13730                         # Pull in everything that's installed but not matched
13731                         # by an argument atom since we don't want to clean any
13732                         # package if something depends on it.
13733
13734                         world_temp_set.clear()
13735                         for pkg in vardb:
13736                                 spinner.update()
13737
13738                                 try:
13739                                         if args_set.findAtomForPackage(pkg) is None:
13740                                                 world_temp_set.add("=" + pkg.cpv)
13741                                                 continue
13742                                 except portage.exception.InvalidDependString, e:
13743                                         show_invalid_depstring_notice(pkg,
13744                                                 pkg.metadata["PROVIDE"], str(e))
13745                                         del e
13746                                         world_temp_set.add("=" + pkg.cpv)
13747                                         continue
13748
13749         elif action == "prune":
13750
13751                 # Pull in everything that's installed since we don't
13752                 # to prune a package if something depends on it.
13753                 world_temp_set.clear()
13754                 world_temp_set.update(vardb.cp_all())
13755
13756                 if not args_set:
13757
13758                         # Try to prune everything that's slotted.
13759                         for cp in vardb.cp_all():
13760                                 if len(vardb.cp_list(cp)) > 1:
13761                                         args_set.add(cp)
13762
13763                 # Remove atoms from world that match installed packages
13764                 # that are also matched by argument atoms, but do not remove
13765                 # them if they match the highest installed version.
13766                 for pkg in vardb:
13767                         spinner.update()
13768                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13769                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13770                                 raise AssertionError("package expected in matches: " + \
13771                                         "cp = %s, cpv = %s matches = %s" % \
13772                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13773
13774                         highest_version = pkgs_for_cp[-1]
13775                         if pkg == highest_version:
13776                                 # pkg is the highest version
13777                                 world_temp_set.add("=" + pkg.cpv)
13778                                 continue
13779
13780                         if len(pkgs_for_cp) <= 1:
13781                                 raise AssertionError("more packages expected: " + \
13782                                         "cp = %s, cpv = %s matches = %s" % \
13783                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13784
13785                         try:
13786                                 if args_set.findAtomForPackage(pkg) is None:
13787                                         world_temp_set.add("=" + pkg.cpv)
13788                                         continue
13789                         except portage.exception.InvalidDependString, e:
13790                                 show_invalid_depstring_notice(pkg,
13791                                         pkg.metadata["PROVIDE"], str(e))
13792                                 del e
13793                                 world_temp_set.add("=" + pkg.cpv)
13794                                 continue
13795
13796         set_args = {}
13797         for s, package_set in required_sets.iteritems():
13798                 set_atom = SETPREFIX + s
13799                 set_arg = SetArg(arg=set_atom, set=package_set,
13800                         root_config=resolver.roots[myroot])
13801                 set_args[s] = set_arg
13802                 for atom in set_arg.set:
13803                         resolver._dep_stack.append(
13804                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13805                         resolver.digraph.add(set_arg, None)
13806
13807         success = resolver._complete_graph()
13808         writemsg_level("\b\b... done!\n")
13809
13810         resolver.display_problems()
13811
13812         if not success:
13813                 return 1
13814
13815         def unresolved_deps():
13816
13817                 unresolvable = set()
13818                 for dep in resolver._initially_unsatisfied_deps:
13819                         if isinstance(dep.parent, Package) and \
13820                                 (dep.priority > UnmergeDepPriority.SOFT):
13821                                 unresolvable.add((dep.atom, dep.parent.cpv))
13822
13823                 if not unresolvable:
13824                         return False
13825
13826                 if unresolvable and not allow_missing_deps:
13827                         prefix = bad(" * ")
13828                         msg = []
13829                         msg.append("Dependencies could not be completely resolved due to")
13830                         msg.append("the following required packages not being installed:")
13831                         msg.append("")
13832                         for atom, parent in unresolvable:
13833                                 msg.append("  %s pulled in by:" % (atom,))
13834                                 msg.append("    %s" % (parent,))
13835                                 msg.append("")
13836                         msg.append("Have you forgotten to run " + \
13837                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13838                         msg.append(("to %s? It may be necessary to manually " + \
13839                                 "uninstall packages that no longer") % action)
13840                         msg.append("exist in the portage tree since " + \
13841                                 "it may not be possible to satisfy their")
13842                         msg.append("dependencies.  Also, be aware of " + \
13843                                 "the --with-bdeps option that is documented")
13844                         msg.append("in " + good("`man emerge`") + ".")
13845                         if action == "prune":
13846                                 msg.append("")
13847                                 msg.append("If you would like to ignore " + \
13848                                         "dependencies then use %s." % good("--nodeps"))
13849                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13850                                 level=logging.ERROR, noiselevel=-1)
13851                         return True
13852                 return False
13853
13854         if unresolved_deps():
13855                 return 1
13856
13857         graph = resolver.digraph.copy()
13858         required_pkgs_total = 0
13859         for node in graph:
13860                 if isinstance(node, Package):
13861                         required_pkgs_total += 1
13862
13863         def show_parents(child_node):
13864                 parent_nodes = graph.parent_nodes(child_node)
13865                 if not parent_nodes:
13866                         # With --prune, the highest version can be pulled in without any
13867                         # real parent since all installed packages are pulled in.  In that
13868                         # case there's nothing to show here.
13869                         return
13870                 parent_strs = []
13871                 for node in parent_nodes:
13872                         parent_strs.append(str(getattr(node, "cpv", node)))
13873                 parent_strs.sort()
13874                 msg = []
13875                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13876                 for parent_str in parent_strs:
13877                         msg.append("    %s\n" % (parent_str,))
13878                 msg.append("\n")
13879                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13880
13881         def cmp_pkg_cpv(pkg1, pkg2):
13882                 """Sort Package instances by cpv."""
13883                 if pkg1.cpv > pkg2.cpv:
13884                         return 1
13885                 elif pkg1.cpv == pkg2.cpv:
13886                         return 0
13887                 else:
13888                         return -1
13889
13890         def create_cleanlist():
13891                 pkgs_to_remove = []
13892
13893                 if action == "depclean":
13894                         if args_set:
13895
13896                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13897                                         arg_atom = None
13898                                         try:
13899                                                 arg_atom = args_set.findAtomForPackage(pkg)
13900                                         except portage.exception.InvalidDependString:
13901                                                 # this error has already been displayed by now
13902                                                 continue
13903
13904                                         if arg_atom:
13905                                                 if pkg not in graph:
13906                                                         pkgs_to_remove.append(pkg)
13907                                                 elif "--verbose" in myopts:
13908                                                         show_parents(pkg)
13909
13910                         else:
13911                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13912                                         if pkg not in graph:
13913                                                 pkgs_to_remove.append(pkg)
13914                                         elif "--verbose" in myopts:
13915                                                 show_parents(pkg)
13916
13917                 elif action == "prune":
13918                         # Prune really uses all installed instead of world. It's not
13919                         # a real reverse dependency so don't display it as such.
13920                         graph.remove(set_args["world"])
13921
13922                         for atom in args_set:
13923                                 for pkg in vardb.match_pkgs(atom):
13924                                         if pkg not in graph:
13925                                                 pkgs_to_remove.append(pkg)
13926                                         elif "--verbose" in myopts:
13927                                                 show_parents(pkg)
13928
13929                 if not pkgs_to_remove:
13930                         writemsg_level(
13931                                 ">>> No packages selected for removal by %s\n" % action)
13932                         if "--verbose" not in myopts:
13933                                 writemsg_level(
13934                                         ">>> To see reverse dependencies, use %s\n" % \
13935                                                 good("--verbose"))
13936                         if action == "prune":
13937                                 writemsg_level(
13938                                         ">>> To ignore dependencies, use %s\n" % \
13939                                                 good("--nodeps"))
13940
13941                 return pkgs_to_remove
13942
13943         cleanlist = create_cleanlist()
13944
13945         if len(cleanlist):
13946                 clean_set = set(cleanlist)
13947
13948                 # Check if any of these package are the sole providers of libraries
13949                 # with consumers that have not been selected for removal. If so, these
13950                 # packages and any dependencies need to be added to the graph.
13951                 real_vardb = trees[myroot]["vartree"].dbapi
13952                 linkmap = real_vardb.linkmap
13953                 liblist = linkmap.listLibraryObjects()
13954                 consumer_cache = {}
13955                 provider_cache = {}
13956                 soname_cache = {}
13957                 consumer_map = {}
13958
13959                 writemsg_level(">>> Checking for lib consumers...\n")
13960
13961                 for pkg in cleanlist:
13962                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13963                         provided_libs = set()
13964
13965                         for lib in liblist:
13966                                 if pkg_dblink.isowner(lib, myroot):
13967                                         provided_libs.add(lib)
13968
13969                         if not provided_libs:
13970                                 continue
13971
13972                         consumers = {}
13973                         for lib in provided_libs:
13974                                 lib_consumers = consumer_cache.get(lib)
13975                                 if lib_consumers is None:
13976                                         lib_consumers = linkmap.findConsumers(lib)
13977                                         consumer_cache[lib] = lib_consumers
13978                                 if lib_consumers:
13979                                         consumers[lib] = lib_consumers
13980
13981                         if not consumers:
13982                                 continue
13983
13984                         for lib, lib_consumers in consumers.items():
13985                                 for consumer_file in list(lib_consumers):
13986                                         if pkg_dblink.isowner(consumer_file, myroot):
13987                                                 lib_consumers.remove(consumer_file)
13988                                 if not lib_consumers:
13989                                         del consumers[lib]
13990
13991                         if not consumers:
13992                                 continue
13993
13994                         for lib, lib_consumers in consumers.iteritems():
13995
13996                                 soname = soname_cache.get(lib)
13997                                 if soname is None:
13998                                         soname = linkmap.getSoname(lib)
13999                                         soname_cache[lib] = soname
14000
14001                                 consumer_providers = []
14002                                 for lib_consumer in lib_consumers:
14003                                         providers = provider_cache.get(lib)
14004                                         if providers is None:
14005                                                 providers = linkmap.findProviders(lib_consumer)
14006                                                 provider_cache[lib_consumer] = providers
14007                                         if soname not in providers:
14008                                                 # Why does this happen?
14009                                                 continue
14010                                         consumer_providers.append(
14011                                                 (lib_consumer, providers[soname]))
14012
14013                                 consumers[lib] = consumer_providers
14014
14015                         consumer_map[pkg] = consumers
14016
14017                 if consumer_map:
14018
14019                         search_files = set()
14020                         for consumers in consumer_map.itervalues():
14021                                 for lib, consumer_providers in consumers.iteritems():
14022                                         for lib_consumer, providers in consumer_providers:
14023                                                 search_files.add(lib_consumer)
14024                                                 search_files.update(providers)
14025
14026                         writemsg_level(">>> Assigning files to packages...\n")
14027                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14028
14029                         for pkg, consumers in consumer_map.items():
14030                                 for lib, consumer_providers in consumers.items():
14031                                         lib_consumers = set()
14032
14033                                         for lib_consumer, providers in consumer_providers:
14034                                                 owner_set = file_owners.get(lib_consumer)
14035                                                 provider_dblinks = set()
14036                                                 provider_pkgs = set()
14037
14038                                                 if len(providers) > 1:
14039                                                         for provider in providers:
14040                                                                 provider_set = file_owners.get(provider)
14041                                                                 if provider_set is not None:
14042                                                                         provider_dblinks.update(provider_set)
14043
14044                                                 if len(provider_dblinks) > 1:
14045                                                         for provider_dblink in provider_dblinks:
14046                                                                 pkg_key = ("installed", myroot,
14047                                                                         provider_dblink.mycpv, "nomerge")
14048                                                                 if pkg_key not in clean_set:
14049                                                                         provider_pkgs.add(vardb.get(pkg_key))
14050
14051                                                 if provider_pkgs:
14052                                                         continue
14053
14054                                                 if owner_set is not None:
14055                                                         lib_consumers.update(owner_set)
14056
14057                                         for consumer_dblink in list(lib_consumers):
14058                                                 if ("installed", myroot, consumer_dblink.mycpv,
14059                                                         "nomerge") in clean_set:
14060                                                         lib_consumers.remove(consumer_dblink)
14061                                                         continue
14062
14063                                         if lib_consumers:
14064                                                 consumers[lib] = lib_consumers
14065                                         else:
14066                                                 del consumers[lib]
14067                                 if not consumers:
14068                                         del consumer_map[pkg]
14069
14070                 if consumer_map:
14071                         # TODO: Implement a package set for rebuilding consumer packages.
14072
14073                         msg = "In order to avoid breakage of link level " + \
14074                                 "dependencies, one or more packages will not be removed. " + \
14075                                 "This can be solved by rebuilding " + \
14076                                 "the packages that pulled them in."
14077
14078                         prefix = bad(" * ")
14079                         from textwrap import wrap
14080                         writemsg_level("".join(prefix + "%s\n" % line for \
14081                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14082
14083                         msg = []
14084                         for pkg, consumers in consumer_map.iteritems():
14085                                 unique_consumers = set(chain(*consumers.values()))
14086                                 unique_consumers = sorted(consumer.mycpv \
14087                                         for consumer in unique_consumers)
14088                                 msg.append("")
14089                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14090                                 for consumer in unique_consumers:
14091                                         msg.append("    %s" % (consumer,))
14092                         msg.append("")
14093                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14094                                 level=logging.WARNING, noiselevel=-1)
14095
14096                         # Add lib providers to the graph as children of lib consumers,
14097                         # and also add any dependencies pulled in by the provider.
14098                         writemsg_level(">>> Adding lib providers to graph...\n")
14099
14100                         for pkg, consumers in consumer_map.iteritems():
14101                                 for consumer_dblink in set(chain(*consumers.values())):
14102                                         consumer_pkg = vardb.get(("installed", myroot,
14103                                                 consumer_dblink.mycpv, "nomerge"))
14104                                         if not resolver._add_pkg(pkg,
14105                                                 Dependency(parent=consumer_pkg,
14106                                                 priority=UnmergeDepPriority(runtime=True),
14107                                                 root=pkg.root)):
14108                                                 resolver.display_problems()
14109                                                 return 1
14110
14111                         writemsg_level("\nCalculating dependencies  ")
14112                         success = resolver._complete_graph()
14113                         writemsg_level("\b\b... done!\n")
14114                         resolver.display_problems()
14115                         if not success:
14116                                 return 1
14117                         if unresolved_deps():
14118                                 return 1
14119
14120                         graph = resolver.digraph.copy()
14121                         required_pkgs_total = 0
14122                         for node in graph:
14123                                 if isinstance(node, Package):
14124                                         required_pkgs_total += 1
14125                         cleanlist = create_cleanlist()
14126                         if not cleanlist:
14127                                 return 0
14128                         clean_set = set(cleanlist)
14129
14130                 # Use a topological sort to create an unmerge order such that
14131                 # each package is unmerged before it's dependencies. This is
14132                 # necessary to avoid breaking things that may need to run
14133                 # during pkg_prerm or pkg_postrm phases.
14134
14135                 # Create a new graph to account for dependencies between the
14136                 # packages being unmerged.
14137                 graph = digraph()
14138                 del cleanlist[:]
14139
14140                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14141                 runtime = UnmergeDepPriority(runtime=True)
14142                 runtime_post = UnmergeDepPriority(runtime_post=True)
14143                 buildtime = UnmergeDepPriority(buildtime=True)
14144                 priority_map = {
14145                         "RDEPEND": runtime,
14146                         "PDEPEND": runtime_post,
14147                         "DEPEND": buildtime,
14148                 }
14149
14150                 for node in clean_set:
14151                         graph.add(node, None)
14152                         mydeps = []
14153                         node_use = node.metadata["USE"].split()
14154                         for dep_type in dep_keys:
14155                                 depstr = node.metadata[dep_type]
14156                                 if not depstr:
14157                                         continue
14158                                 try:
14159                                         portage.dep._dep_check_strict = False
14160                                         success, atoms = portage.dep_check(depstr, None, settings,
14161                                                 myuse=node_use, trees=resolver._graph_trees,
14162                                                 myroot=myroot)
14163                                 finally:
14164                                         portage.dep._dep_check_strict = True
14165                                 if not success:
14166                                         # Ignore invalid deps of packages that will
14167                                         # be uninstalled anyway.
14168                                         continue
14169
14170                                 priority = priority_map[dep_type]
14171                                 for atom in atoms:
14172                                         if not isinstance(atom, portage.dep.Atom):
14173                                                 # Ignore invalid atoms returned from dep_check().
14174                                                 continue
14175                                         if atom.blocker:
14176                                                 continue
14177                                         matches = vardb.match_pkgs(atom)
14178                                         if not matches:
14179                                                 continue
14180                                         for child_node in matches:
14181                                                 if child_node in clean_set:
14182                                                         graph.add(child_node, node, priority=priority)
14183
14184                 ordered = True
14185                 if len(graph.order) == len(graph.root_nodes()):
14186                         # If there are no dependencies between packages
14187                         # let unmerge() group them by cat/pn.
14188                         ordered = False
14189                         cleanlist = [pkg.cpv for pkg in graph.order]
14190                 else:
14191                         # Order nodes from lowest to highest overall reference count for
14192                         # optimal root node selection.
14193                         node_refcounts = {}
14194                         for node in graph.order:
14195                                 node_refcounts[node] = len(graph.parent_nodes(node))
14196                         def cmp_reference_count(node1, node2):
14197                                 return node_refcounts[node1] - node_refcounts[node2]
14198                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14199         
14200                         ignore_priority_range = [None]
14201                         ignore_priority_range.extend(
14202                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14203                         while not graph.empty():
14204                                 for ignore_priority in ignore_priority_range:
14205                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14206                                         if nodes:
14207                                                 break
14208                                 if not nodes:
14209                                         raise AssertionError("no root nodes")
14210                                 if ignore_priority is not None:
14211                                         # Some deps have been dropped due to circular dependencies,
14212                                         # so only pop one node in order do minimize the number that
14213                                         # are dropped.
14214                                         del nodes[1:]
14215                                 for node in nodes:
14216                                         graph.remove(node)
14217                                         cleanlist.append(node.cpv)
14218
14219                 unmerge(root_config, myopts, "unmerge", cleanlist,
14220                         ldpath_mtimes, ordered=ordered)
14221
14222         if action == "prune":
14223                 return
14224
14225         if not cleanlist and "--quiet" in myopts:
14226                 return
14227
14228         print "Packages installed:   "+str(len(vardb.cpv_all()))
14229         print "Packages in world:    " + \
14230                 str(len(root_config.sets["world"].getAtoms()))
14231         print "Packages in system:   " + \
14232                 str(len(root_config.sets["system"].getAtoms()))
14233         print "Required packages:    "+str(required_pkgs_total)
14234         if "--pretend" in myopts:
14235                 print "Number to remove:     "+str(len(cleanlist))
14236         else:
14237                 print "Number removed:       "+str(len(cleanlist))
14238
14239 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14240         """
14241         Construct a depgraph for the given resume list. This will raise
14242         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14243         @rtype: tuple
14244         @returns: (success, depgraph, dropped_tasks)
14245         """
14246         skip_masked = True
14247         skip_unsatisfied = True
14248         mergelist = mtimedb["resume"]["mergelist"]
14249         dropped_tasks = set()
14250         while True:
14251                 mydepgraph = depgraph(settings, trees,
14252                         myopts, myparams, spinner)
14253                 try:
14254                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14255                                 skip_masked=skip_masked)
14256                 except depgraph.UnsatisfiedResumeDep, e:
14257                         if not skip_unsatisfied:
14258                                 raise
14259
14260                         graph = mydepgraph.digraph
14261                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14262                                 for dep in e.value)
14263                         traversed_nodes = set()
14264                         unsatisfied_stack = list(unsatisfied_parents)
14265                         while unsatisfied_stack:
14266                                 pkg = unsatisfied_stack.pop()
14267                                 if pkg in traversed_nodes:
14268                                         continue
14269                                 traversed_nodes.add(pkg)
14270
14271                                 # If this package was pulled in by a parent
14272                                 # package scheduled for merge, removing this
14273                                 # package may cause the the parent package's
14274                                 # dependency to become unsatisfied.
14275                                 for parent_node in graph.parent_nodes(pkg):
14276                                         if not isinstance(parent_node, Package) \
14277                                                 or parent_node.operation not in ("merge", "nomerge"):
14278                                                 continue
14279                                         unsatisfied = \
14280                                                 graph.child_nodes(parent_node,
14281                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14282                                         if pkg in unsatisfied:
14283                                                 unsatisfied_parents[parent_node] = parent_node
14284                                                 unsatisfied_stack.append(parent_node)
14285
14286                         pruned_mergelist = []
14287                         for x in mergelist:
14288                                 if isinstance(x, list) and \
14289                                         tuple(x) not in unsatisfied_parents:
14290                                         pruned_mergelist.append(x)
14291
14292                         # If the mergelist doesn't shrink then this loop is infinite.
14293                         if len(pruned_mergelist) == len(mergelist):
14294                                 # This happens if a package can't be dropped because
14295                                 # it's already installed, but it has unsatisfied PDEPEND.
14296                                 raise
14297                         mergelist[:] = pruned_mergelist
14298
14299                         # Exclude installed packages that have been removed from the graph due
14300                         # to failure to build/install runtime dependencies after the dependent
14301                         # package has already been installed.
14302                         dropped_tasks.update(pkg for pkg in \
14303                                 unsatisfied_parents if pkg.operation != "nomerge")
14304                         mydepgraph.break_refs(unsatisfied_parents)
14305
14306                         del e, graph, traversed_nodes, \
14307                                 unsatisfied_parents, unsatisfied_stack
14308                         continue
14309                 else:
14310                         break
14311         return (success, mydepgraph, dropped_tasks)
14312
14313 def action_build(settings, trees, mtimedb,
14314         myopts, myaction, myfiles, spinner):
14315
14316         # validate the state of the resume data
14317         # so that we can make assumptions later.
14318         for k in ("resume", "resume_backup"):
14319                 if k not in mtimedb:
14320                         continue
14321                 resume_data = mtimedb[k]
14322                 if not isinstance(resume_data, dict):
14323                         del mtimedb[k]
14324                         continue
14325                 mergelist = resume_data.get("mergelist")
14326                 if not isinstance(mergelist, list):
14327                         del mtimedb[k]
14328                         continue
14329                 for x in mergelist:
14330                         if not (isinstance(x, list) and len(x) == 4):
14331                                 continue
14332                         pkg_type, pkg_root, pkg_key, pkg_action = x
14333                         if pkg_root not in trees:
14334                                 # Current $ROOT setting differs,
14335                                 # so the list must be stale.
14336                                 mergelist = None
14337                                 break
14338                 if not mergelist:
14339                         del mtimedb[k]
14340                         continue
14341                 resume_opts = resume_data.get("myopts")
14342                 if not isinstance(resume_opts, (dict, list)):
14343                         del mtimedb[k]
14344                         continue
14345                 favorites = resume_data.get("favorites")
14346                 if not isinstance(favorites, list):
14347                         del mtimedb[k]
14348                         continue
14349
14350         resume = False
14351         if "--resume" in myopts and \
14352                 ("resume" in mtimedb or
14353                 "resume_backup" in mtimedb):
14354                 resume = True
14355                 if "resume" not in mtimedb:
14356                         mtimedb["resume"] = mtimedb["resume_backup"]
14357                         del mtimedb["resume_backup"]
14358                         mtimedb.commit()
14359                 # "myopts" is a list for backward compatibility.
14360                 resume_opts = mtimedb["resume"].get("myopts", [])
14361                 if isinstance(resume_opts, list):
14362                         resume_opts = dict((k,True) for k in resume_opts)
14363                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14364                         resume_opts.pop(opt, None)
14365                 myopts.update(resume_opts)
14366
14367                 if "--debug" in myopts:
14368                         writemsg_level("myopts %s\n" % (myopts,))
14369
14370                 # Adjust config according to options of the command being resumed.
14371                 for myroot in trees:
14372                         mysettings =  trees[myroot]["vartree"].settings
14373                         mysettings.unlock()
14374                         adjust_config(myopts, mysettings)
14375                         mysettings.lock()
14376                         del myroot, mysettings
14377
14378         ldpath_mtimes = mtimedb["ldpath"]
14379         favorites=[]
14380         merge_count = 0
14381         buildpkgonly = "--buildpkgonly" in myopts
14382         pretend = "--pretend" in myopts
14383         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14384         ask = "--ask" in myopts
14385         nodeps = "--nodeps" in myopts
14386         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14387         tree = "--tree" in myopts
14388         if nodeps and tree:
14389                 tree = False
14390                 del myopts["--tree"]
14391                 portage.writemsg(colorize("WARN", " * ") + \
14392                         "--tree is broken with --nodeps. Disabling...\n")
14393         debug = "--debug" in myopts
14394         verbose = "--verbose" in myopts
14395         quiet = "--quiet" in myopts
14396         if pretend or fetchonly:
14397                 # make the mtimedb readonly
14398                 mtimedb.filename = None
14399         if '--digest' in myopts or 'digest' in settings.features:
14400                 if '--digest' in myopts:
14401                         msg = "The --digest option"
14402                 else:
14403                         msg = "The FEATURES=digest setting"
14404
14405                 msg += " can prevent corruption from being" + \
14406                         " noticed. The `repoman manifest` command is the preferred" + \
14407                         " way to generate manifests and it is capable of doing an" + \
14408                         " entire repository or category at once."
14409                 prefix = bad(" * ")
14410                 writemsg(prefix + "\n")
14411                 from textwrap import wrap
14412                 for line in wrap(msg, 72):
14413                         writemsg("%s%s\n" % (prefix, line))
14414                 writemsg(prefix + "\n")
14415
14416         if "--quiet" not in myopts and \
14417                 ("--pretend" in myopts or "--ask" in myopts or \
14418                 "--tree" in myopts or "--verbose" in myopts):
14419                 action = ""
14420                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14421                         action = "fetched"
14422                 elif "--buildpkgonly" in myopts:
14423                         action = "built"
14424                 else:
14425                         action = "merged"
14426                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14427                         print
14428                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14429                         print
14430                 else:
14431                         print
14432                         print darkgreen("These are the packages that would be %s, in order:") % action
14433                         print
14434
14435         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14436         if not show_spinner:
14437                 spinner.update = spinner.update_quiet
14438
14439         if resume:
14440                 favorites = mtimedb["resume"].get("favorites")
14441                 if not isinstance(favorites, list):
14442                         favorites = []
14443
14444                 if show_spinner:
14445                         print "Calculating dependencies  ",
14446                 myparams = create_depgraph_params(myopts, myaction)
14447
14448                 resume_data = mtimedb["resume"]
14449                 mergelist = resume_data["mergelist"]
14450                 if mergelist and "--skipfirst" in myopts:
14451                         for i, task in enumerate(mergelist):
14452                                 if isinstance(task, list) and \
14453                                         task and task[-1] == "merge":
14454                                         del mergelist[i]
14455                                         break
14456
14457                 success = False
14458                 mydepgraph = None
14459                 try:
14460                         success, mydepgraph, dropped_tasks = resume_depgraph(
14461                                 settings, trees, mtimedb, myopts, myparams, spinner)
14462                 except (portage.exception.PackageNotFound,
14463                         depgraph.UnsatisfiedResumeDep), e:
14464                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14465                                 mydepgraph = e.depgraph
14466                         if show_spinner:
14467                                 print
14468                         from textwrap import wrap
14469                         from portage.output import EOutput
14470                         out = EOutput()
14471
14472                         resume_data = mtimedb["resume"]
14473                         mergelist = resume_data.get("mergelist")
14474                         if not isinstance(mergelist, list):
14475                                 mergelist = []
14476                         if mergelist and debug or (verbose and not quiet):
14477                                 out.eerror("Invalid resume list:")
14478                                 out.eerror("")
14479                                 indent = "  "
14480                                 for task in mergelist:
14481                                         if isinstance(task, list):
14482                                                 out.eerror(indent + str(tuple(task)))
14483                                 out.eerror("")
14484
14485                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14486                                 out.eerror("One or more packages are either masked or " + \
14487                                         "have missing dependencies:")
14488                                 out.eerror("")
14489                                 indent = "  "
14490                                 for dep in e.value:
14491                                         if dep.atom is None:
14492                                                 out.eerror(indent + "Masked package:")
14493                                                 out.eerror(2 * indent + str(dep.parent))
14494                                                 out.eerror("")
14495                                         else:
14496                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14497                                                 out.eerror(2 * indent + str(dep.parent))
14498                                                 out.eerror("")
14499                                 msg = "The resume list contains packages " + \
14500                                         "that are either masked or have " + \
14501                                         "unsatisfied dependencies. " + \
14502                                         "Please restart/continue " + \
14503                                         "the operation manually, or use --skipfirst " + \
14504                                         "to skip the first package in the list and " + \
14505                                         "any other packages that may be " + \
14506                                         "masked or have missing dependencies."
14507                                 for line in wrap(msg, 72):
14508                                         out.eerror(line)
14509                         elif isinstance(e, portage.exception.PackageNotFound):
14510                                 out.eerror("An expected package is " + \
14511                                         "not available: %s" % str(e))
14512                                 out.eerror("")
14513                                 msg = "The resume list contains one or more " + \
14514                                         "packages that are no longer " + \
14515                                         "available. Please restart/continue " + \
14516                                         "the operation manually."
14517                                 for line in wrap(msg, 72):
14518                                         out.eerror(line)
14519                 else:
14520                         if show_spinner:
14521                                 print "\b\b... done!"
14522
14523                 if success:
14524                         if dropped_tasks:
14525                                 portage.writemsg("!!! One or more packages have been " + \
14526                                         "dropped due to\n" + \
14527                                         "!!! masking or unsatisfied dependencies:\n\n",
14528                                         noiselevel=-1)
14529                                 for task in dropped_tasks:
14530                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14531                                 portage.writemsg("\n", noiselevel=-1)
14532                         del dropped_tasks
14533                 else:
14534                         if mydepgraph is not None:
14535                                 mydepgraph.display_problems()
14536                         if not (ask or pretend):
14537                                 # delete the current list and also the backup
14538                                 # since it's probably stale too.
14539                                 for k in ("resume", "resume_backup"):
14540                                         mtimedb.pop(k, None)
14541                                 mtimedb.commit()
14542
14543                         return 1
14544         else:
14545                 if ("--resume" in myopts):
14546                         print darkgreen("emerge: It seems we have nothing to resume...")
14547                         return os.EX_OK
14548
14549                 myparams = create_depgraph_params(myopts, myaction)
14550                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14551                         print "Calculating dependencies  ",
14552                         sys.stdout.flush()
14553                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14554                 try:
14555                         retval, favorites = mydepgraph.select_files(myfiles)
14556                 except portage.exception.PackageNotFound, e:
14557                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14558                         return 1
14559                 except portage.exception.PackageSetNotFound, e:
14560                         root_config = trees[settings["ROOT"]]["root_config"]
14561                         display_missing_pkg_set(root_config, e.value)
14562                         return 1
14563                 if show_spinner:
14564                         print "\b\b... done!"
14565                 if not retval:
14566                         mydepgraph.display_problems()
14567                         return 1
14568
14569         if "--pretend" not in myopts and \
14570                 ("--ask" in myopts or "--tree" in myopts or \
14571                 "--verbose" in myopts) and \
14572                 not ("--quiet" in myopts and "--ask" not in myopts):
14573                 if "--resume" in myopts:
14574                         mymergelist = mydepgraph.altlist()
14575                         if len(mymergelist) == 0:
14576                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14577                                 return os.EX_OK
14578                         favorites = mtimedb["resume"]["favorites"]
14579                         retval = mydepgraph.display(
14580                                 mydepgraph.altlist(reversed=tree),
14581                                 favorites=favorites)
14582                         mydepgraph.display_problems()
14583                         if retval != os.EX_OK:
14584                                 return retval
14585                         prompt="Would you like to resume merging these packages?"
14586                 else:
14587                         retval = mydepgraph.display(
14588                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14589                                 favorites=favorites)
14590                         mydepgraph.display_problems()
14591                         if retval != os.EX_OK:
14592                                 return retval
14593                         mergecount=0
14594                         for x in mydepgraph.altlist():
14595                                 if isinstance(x, Package) and x.operation == "merge":
14596                                         mergecount += 1
14597
14598                         if mergecount==0:
14599                                 sets = trees[settings["ROOT"]]["root_config"].sets
14600                                 world_candidates = None
14601                                 if "--noreplace" in myopts and \
14602                                         not oneshot and favorites:
14603                                         # Sets that are not world candidates are filtered
14604                                         # out here since the favorites list needs to be
14605                                         # complete for depgraph.loadResumeCommand() to
14606                                         # operate correctly.
14607                                         world_candidates = [x for x in favorites \
14608                                                 if not (x.startswith(SETPREFIX) and \
14609                                                 not sets[x[1:]].world_candidate)]
14610                                 if "--noreplace" in myopts and \
14611                                         not oneshot and world_candidates:
14612                                         print
14613                                         for x in world_candidates:
14614                                                 print " %s %s" % (good("*"), x)
14615                                         prompt="Would you like to add these packages to your world favorites?"
14616                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14617                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14618                                 else:
14619                                         print
14620                                         print "Nothing to merge; quitting."
14621                                         print
14622                                         return os.EX_OK
14623                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14624                                 prompt="Would you like to fetch the source files for these packages?"
14625                         else:
14626                                 prompt="Would you like to merge these packages?"
14627                 print
14628                 if "--ask" in myopts and userquery(prompt) == "No":
14629                         print
14630                         print "Quitting."
14631                         print
14632                         return os.EX_OK
14633                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14634                 myopts.pop("--ask", None)
14635
14636         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14637                 if ("--resume" in myopts):
14638                         mymergelist = mydepgraph.altlist()
14639                         if len(mymergelist) == 0:
14640                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14641                                 return os.EX_OK
14642                         favorites = mtimedb["resume"]["favorites"]
14643                         retval = mydepgraph.display(
14644                                 mydepgraph.altlist(reversed=tree),
14645                                 favorites=favorites)
14646                         mydepgraph.display_problems()
14647                         if retval != os.EX_OK:
14648                                 return retval
14649                 else:
14650                         retval = mydepgraph.display(
14651                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14652                                 favorites=favorites)
14653                         mydepgraph.display_problems()
14654                         if retval != os.EX_OK:
14655                                 return retval
14656                         if "--buildpkgonly" in myopts:
14657                                 graph_copy = mydepgraph.digraph.clone()
14658                                 removed_nodes = set()
14659                                 for node in graph_copy:
14660                                         if not isinstance(node, Package) or \
14661                                                 node.operation == "nomerge":
14662                                                 removed_nodes.add(node)
14663                                 graph_copy.difference_update(removed_nodes)
14664                                 if not graph_copy.hasallzeros(ignore_priority = \
14665                                         DepPrioritySatisfiedRange.ignore_medium):
14666                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14667                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14668                                         return 1
14669         else:
14670                 if "--buildpkgonly" in myopts:
14671                         graph_copy = mydepgraph.digraph.clone()
14672                         removed_nodes = set()
14673                         for node in graph_copy:
14674                                 if not isinstance(node, Package) or \
14675                                         node.operation == "nomerge":
14676                                         removed_nodes.add(node)
14677                         graph_copy.difference_update(removed_nodes)
14678                         if not graph_copy.hasallzeros(ignore_priority = \
14679                                 DepPrioritySatisfiedRange.ignore_medium):
14680                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14681                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14682                                 return 1
14683
14684                 if ("--resume" in myopts):
14685                         favorites=mtimedb["resume"]["favorites"]
14686                         mymergelist = mydepgraph.altlist()
14687                         mydepgraph.break_refs(mymergelist)
14688                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14689                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14690                         del mydepgraph, mymergelist
14691                         clear_caches(trees)
14692
14693                         retval = mergetask.merge()
14694                         merge_count = mergetask.curval
14695                 else:
14696                         if "resume" in mtimedb and \
14697                         "mergelist" in mtimedb["resume"] and \
14698                         len(mtimedb["resume"]["mergelist"]) > 1:
14699                                 mtimedb["resume_backup"] = mtimedb["resume"]
14700                                 del mtimedb["resume"]
14701                                 mtimedb.commit()
14702                         mtimedb["resume"]={}
14703                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14704                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14705                         # a list type for options.
14706                         mtimedb["resume"]["myopts"] = myopts.copy()
14707
14708                         # Convert Atom instances to plain str.
14709                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14710
14711                         pkglist = mydepgraph.altlist()
14712                         mydepgraph.saveNomergeFavorites()
14713                         mydepgraph.break_refs(pkglist)
14714                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14715                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14716                         del mydepgraph, pkglist
14717                         clear_caches(trees)
14718
14719                         retval = mergetask.merge()
14720                         merge_count = mergetask.curval
14721
14722                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14723                         if "yes" == settings.get("AUTOCLEAN"):
14724                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14725                                 unmerge(trees[settings["ROOT"]]["root_config"],
14726                                         myopts, "clean", [],
14727                                         ldpath_mtimes, autoclean=1)
14728                         else:
14729                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14730                                         + " AUTOCLEAN is disabled.  This can cause serious"
14731                                         + " problems due to overlapping packages.\n")
14732                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14733
14734                 return retval
14735
14736 def multiple_actions(action1, action2):
14737         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14738         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14739         sys.exit(1)
14740
14741 def insert_optional_args(args):
14742         """
14743         Parse optional arguments and insert a value if one has
14744         not been provided. This is done before feeding the args
14745         to the optparse parser since that parser does not support
14746         this feature natively.
14747         """
14748
14749         new_args = []
14750         jobs_opts = ("-j", "--jobs")
14751         arg_stack = args[:]
14752         arg_stack.reverse()
14753         while arg_stack:
14754                 arg = arg_stack.pop()
14755
14756                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14757                 if not (short_job_opt or arg in jobs_opts):
14758                         new_args.append(arg)
14759                         continue
14760
14761                 # Insert an empty placeholder in order to
14762                 # satisfy the requirements of optparse.
14763
14764                 new_args.append("--jobs")
14765                 job_count = None
14766                 saved_opts = None
14767                 if short_job_opt and len(arg) > 2:
14768                         if arg[:2] == "-j":
14769                                 try:
14770                                         job_count = int(arg[2:])
14771                                 except ValueError:
14772                                         saved_opts = arg[2:]
14773                         else:
14774                                 job_count = "True"
14775                                 saved_opts = arg[1:].replace("j", "")
14776
14777                 if job_count is None and arg_stack:
14778                         try:
14779                                 job_count = int(arg_stack[-1])
14780                         except ValueError:
14781                                 pass
14782                         else:
14783                                 # Discard the job count from the stack
14784                                 # since we're consuming it here.
14785                                 arg_stack.pop()
14786
14787                 if job_count is None:
14788                         # unlimited number of jobs
14789                         new_args.append("True")
14790                 else:
14791                         new_args.append(str(job_count))
14792
14793                 if saved_opts is not None:
14794                         new_args.append("-" + saved_opts)
14795
14796         return new_args
14797
14798 def parse_opts(tmpcmdline, silent=False):
14799         myaction=None
14800         myopts = {}
14801         myfiles=[]
14802
14803         global actions, options, shortmapping
14804
14805         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14806         argument_options = {
14807                 "--config-root": {
14808                         "help":"specify the location for portage configuration files",
14809                         "action":"store"
14810                 },
14811                 "--color": {
14812                         "help":"enable or disable color output",
14813                         "type":"choice",
14814                         "choices":("y", "n")
14815                 },
14816
14817                 "--jobs": {
14818
14819                         "help"   : "Specifies the number of packages to build " + \
14820                                 "simultaneously.",
14821
14822                         "action" : "store"
14823                 },
14824
14825                 "--load-average": {
14826
14827                         "help"   :"Specifies that no new builds should be started " + \
14828                                 "if there are other builds running and the load average " + \
14829                                 "is at least LOAD (a floating-point number).",
14830
14831                         "action" : "store"
14832                 },
14833
14834                 "--with-bdeps": {
14835                         "help":"include unnecessary build time dependencies",
14836                         "type":"choice",
14837                         "choices":("y", "n")
14838                 },
14839                 "--reinstall": {
14840                         "help":"specify conditions to trigger package reinstallation",
14841                         "type":"choice",
14842                         "choices":["changed-use"]
14843                 },
14844                 "--root": {
14845                  "help"   : "specify the target root filesystem for merging packages",
14846                  "action" : "store"
14847                 },
14848         }
14849
14850         from optparse import OptionParser
14851         parser = OptionParser()
14852         if parser.has_option("--help"):
14853                 parser.remove_option("--help")
14854
14855         for action_opt in actions:
14856                 parser.add_option("--" + action_opt, action="store_true",
14857                         dest=action_opt.replace("-", "_"), default=False)
14858         for myopt in options:
14859                 parser.add_option(myopt, action="store_true",
14860                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14861         for shortopt, longopt in shortmapping.iteritems():
14862                 parser.add_option("-" + shortopt, action="store_true",
14863                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14864         for myalias, myopt in longopt_aliases.iteritems():
14865                 parser.add_option(myalias, action="store_true",
14866                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14867
14868         for myopt, kwargs in argument_options.iteritems():
14869                 parser.add_option(myopt,
14870                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14871
14872         tmpcmdline = insert_optional_args(tmpcmdline)
14873
14874         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14875
14876         if myoptions.jobs:
14877                 jobs = None
14878                 if myoptions.jobs == "True":
14879                         jobs = True
14880                 else:
14881                         try:
14882                                 jobs = int(myoptions.jobs)
14883                         except ValueError:
14884                                 jobs = -1
14885
14886                 if jobs is not True and \
14887                         jobs < 1:
14888                         jobs = None
14889                         if not silent:
14890                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14891                                         (myoptions.jobs,), noiselevel=-1)
14892
14893                 myoptions.jobs = jobs
14894
14895         if myoptions.load_average:
14896                 try:
14897                         load_average = float(myoptions.load_average)
14898                 except ValueError:
14899                         load_average = 0.0
14900
14901                 if load_average <= 0.0:
14902                         load_average = None
14903                         if not silent:
14904                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14905                                         (myoptions.load_average,), noiselevel=-1)
14906
14907                 myoptions.load_average = load_average
14908
14909         for myopt in options:
14910                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14911                 if v:
14912                         myopts[myopt] = True
14913
14914         for myopt in argument_options:
14915                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14916                 if v is not None:
14917                         myopts[myopt] = v
14918
14919         if myoptions.searchdesc:
14920                 myoptions.search = True
14921
14922         for action_opt in actions:
14923                 v = getattr(myoptions, action_opt.replace("-", "_"))
14924                 if v:
14925                         if myaction:
14926                                 multiple_actions(myaction, action_opt)
14927                                 sys.exit(1)
14928                         myaction = action_opt
14929
14930         myfiles += myargs
14931
14932         return myaction, myopts, myfiles
14933
14934 def validate_ebuild_environment(trees):
14935         for myroot in trees:
14936                 settings = trees[myroot]["vartree"].settings
14937                 settings.validate()
14938
14939 def clear_caches(trees):
14940         for d in trees.itervalues():
14941                 d["porttree"].dbapi.melt()
14942                 d["porttree"].dbapi._aux_cache.clear()
14943                 d["bintree"].dbapi._aux_cache.clear()
14944                 d["bintree"].dbapi._clear_cache()
14945                 d["vartree"].dbapi.linkmap._clear_cache()
14946         portage.dircache.clear()
14947         gc.collect()
14948
14949 def load_emerge_config(trees=None):
14950         kwargs = {}
14951         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14952                 v = os.environ.get(envvar, None)
14953                 if v and v.strip():
14954                         kwargs[k] = v
14955         trees = portage.create_trees(trees=trees, **kwargs)
14956
14957         for root, root_trees in trees.iteritems():
14958                 settings = root_trees["vartree"].settings
14959                 setconfig = load_default_config(settings, root_trees)
14960                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14961
14962         settings = trees["/"]["vartree"].settings
14963
14964         for myroot in trees:
14965                 if myroot != "/":
14966                         settings = trees[myroot]["vartree"].settings
14967                         break
14968
14969         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14970         mtimedb = portage.MtimeDB(mtimedbfile)
14971         
14972         return settings, trees, mtimedb
14973
14974 def adjust_config(myopts, settings):
14975         """Make emerge specific adjustments to the config."""
14976
14977         # To enhance usability, make some vars case insensitive by forcing them to
14978         # lower case.
14979         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14980                 if myvar in settings:
14981                         settings[myvar] = settings[myvar].lower()
14982                         settings.backup_changes(myvar)
14983         del myvar
14984
14985         # Kill noauto as it will break merges otherwise.
14986         if "noauto" in settings.features:
14987                 settings.features.remove('noauto')
14988                 settings['FEATURES'] = ' '.join(sorted(settings.features))
14989                 settings.backup_changes("FEATURES")
14990
14991         CLEAN_DELAY = 5
14992         try:
14993                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14994         except ValueError, e:
14995                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14996                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14997                         settings["CLEAN_DELAY"], noiselevel=-1)
14998         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14999         settings.backup_changes("CLEAN_DELAY")
15000
15001         EMERGE_WARNING_DELAY = 10
15002         try:
15003                 EMERGE_WARNING_DELAY = int(settings.get(
15004                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15005         except ValueError, e:
15006                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15007                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15008                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15009         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15010         settings.backup_changes("EMERGE_WARNING_DELAY")
15011
15012         if "--quiet" in myopts:
15013                 settings["PORTAGE_QUIET"]="1"
15014                 settings.backup_changes("PORTAGE_QUIET")
15015
15016         if "--verbose" in myopts:
15017                 settings["PORTAGE_VERBOSE"] = "1"
15018                 settings.backup_changes("PORTAGE_VERBOSE")
15019
15020         # Set so that configs will be merged regardless of remembered status
15021         if ("--noconfmem" in myopts):
15022                 settings["NOCONFMEM"]="1"
15023                 settings.backup_changes("NOCONFMEM")
15024
15025         # Set various debug markers... They should be merged somehow.
15026         PORTAGE_DEBUG = 0
15027         try:
15028                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15029                 if PORTAGE_DEBUG not in (0, 1):
15030                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15031                                 PORTAGE_DEBUG, noiselevel=-1)
15032                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15033                                 noiselevel=-1)
15034                         PORTAGE_DEBUG = 0
15035         except ValueError, e:
15036                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15037                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15038                         settings["PORTAGE_DEBUG"], noiselevel=-1)
15039                 del e
15040         if "--debug" in myopts:
15041                 PORTAGE_DEBUG = 1
15042         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15043         settings.backup_changes("PORTAGE_DEBUG")
15044
15045         if settings.get("NOCOLOR") not in ("yes","true"):
15046                 portage.output.havecolor = 1
15047
15048         """The explicit --color < y | n > option overrides the NOCOLOR environment
15049         variable and stdout auto-detection."""
15050         if "--color" in myopts:
15051                 if "y" == myopts["--color"]:
15052                         portage.output.havecolor = 1
15053                         settings["NOCOLOR"] = "false"
15054                 else:
15055                         portage.output.havecolor = 0
15056                         settings["NOCOLOR"] = "true"
15057                 settings.backup_changes("NOCOLOR")
15058         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15059                 portage.output.havecolor = 0
15060                 settings["NOCOLOR"] = "true"
15061                 settings.backup_changes("NOCOLOR")
15062
15063 def apply_priorities(settings):
15064         ionice(settings)
15065         nice(settings)
15066
15067 def nice(settings):
15068         try:
15069                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15070         except (OSError, ValueError), e:
15071                 out = portage.output.EOutput()
15072                 out.eerror("Failed to change nice value to '%s'" % \
15073                         settings["PORTAGE_NICENESS"])
15074                 out.eerror("%s\n" % str(e))
15075
15076 def ionice(settings):
15077
15078         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15079         if ionice_cmd:
15080                 ionice_cmd = shlex.split(ionice_cmd)
15081         if not ionice_cmd:
15082                 return
15083
15084         from portage.util import varexpand
15085         variables = {"PID" : str(os.getpid())}
15086         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15087
15088         try:
15089                 rval = portage.process.spawn(cmd, env=os.environ)
15090         except portage.exception.CommandNotFound:
15091                 # The OS kernel probably doesn't support ionice,
15092                 # so return silently.
15093                 return
15094
15095         if rval != os.EX_OK:
15096                 out = portage.output.EOutput()
15097                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15098                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15099
15100 def display_missing_pkg_set(root_config, set_name):
15101
15102         msg = []
15103         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15104                 "The following sets exist:") % \
15105                 colorize("INFORM", set_name))
15106         msg.append("")
15107
15108         for s in sorted(root_config.sets):
15109                 msg.append("    %s" % s)
15110         msg.append("")
15111
15112         writemsg_level("".join("%s\n" % l for l in msg),
15113                 level=logging.ERROR, noiselevel=-1)
15114
15115 def expand_set_arguments(myfiles, myaction, root_config):
15116         retval = os.EX_OK
15117         setconfig = root_config.setconfig
15118
15119         sets = setconfig.getSets()
15120
15121         # In order to know exactly which atoms/sets should be added to the
15122         # world file, the depgraph performs set expansion later. It will get
15123         # confused about where the atoms came from if it's not allowed to
15124         # expand them itself.
15125         do_not_expand = (None, )
15126         newargs = []
15127         for a in myfiles:
15128                 if a in ("system", "world"):
15129                         newargs.append(SETPREFIX+a)
15130                 else:
15131                         newargs.append(a)
15132         myfiles = newargs
15133         del newargs
15134         newargs = []
15135
15136         # separators for set arguments
15137         ARG_START = "{"
15138         ARG_END = "}"
15139
15140         # WARNING: all operators must be of equal length
15141         IS_OPERATOR = "/@"
15142         DIFF_OPERATOR = "-@"
15143         UNION_OPERATOR = "+@"
15144         
15145         for i in range(0, len(myfiles)):
15146                 if myfiles[i].startswith(SETPREFIX):
15147                         start = 0
15148                         end = 0
15149                         x = myfiles[i][len(SETPREFIX):]
15150                         newset = ""
15151                         while x:
15152                                 start = x.find(ARG_START)
15153                                 end = x.find(ARG_END)
15154                                 if start > 0 and start < end:
15155                                         namepart = x[:start]
15156                                         argpart = x[start+1:end]
15157                                 
15158                                         # TODO: implement proper quoting
15159                                         args = argpart.split(",")
15160                                         options = {}
15161                                         for a in args:
15162                                                 if "=" in a:
15163                                                         k, v  = a.split("=", 1)
15164                                                         options[k] = v
15165                                                 else:
15166                                                         options[a] = "True"
15167                                         setconfig.update(namepart, options)
15168                                         newset += (x[:start-len(namepart)]+namepart)
15169                                         x = x[end+len(ARG_END):]
15170                                 else:
15171                                         newset += x
15172                                         x = ""
15173                         myfiles[i] = SETPREFIX+newset
15174                                 
15175         sets = setconfig.getSets()
15176
15177         # display errors that occured while loading the SetConfig instance
15178         for e in setconfig.errors:
15179                 print colorize("BAD", "Error during set creation: %s" % e)
15180         
15181         # emerge relies on the existance of sets with names "world" and "system"
15182         required_sets = ("world", "system")
15183         missing_sets = []
15184
15185         for s in required_sets:
15186                 if s not in sets:
15187                         missing_sets.append(s)
15188         if missing_sets:
15189                 if len(missing_sets) > 2:
15190                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15191                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15192                 elif len(missing_sets) == 2:
15193                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15194                 else:
15195                         missing_sets_str = '"%s"' % missing_sets[-1]
15196                 msg = ["emerge: incomplete set configuration, " + \
15197                         "missing set(s): %s" % missing_sets_str]
15198                 if sets:
15199                         msg.append("        sets defined: %s" % ", ".join(sets))
15200                 msg.append("        This usually means that '%s'" % \
15201                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15202                 msg.append("        is missing or corrupt.")
15203                 for line in msg:
15204                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15205                 return (None, 1)
15206         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15207
15208         for a in myfiles:
15209                 if a.startswith(SETPREFIX):
15210                         # support simple set operations (intersection, difference and union)
15211                         # on the commandline. Expressions are evaluated strictly left-to-right
15212                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15213                                 expression = a[len(SETPREFIX):]
15214                                 expr_sets = []
15215                                 expr_ops = []
15216                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15217                                         is_pos = expression.rfind(IS_OPERATOR)
15218                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15219                                         union_pos = expression.rfind(UNION_OPERATOR)
15220                                         op_pos = max(is_pos, diff_pos, union_pos)
15221                                         s1 = expression[:op_pos]
15222                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15223                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15224                                         if not s2 in sets:
15225                                                 display_missing_pkg_set(root_config, s2)
15226                                                 return (None, 1)
15227                                         expr_sets.insert(0, s2)
15228                                         expr_ops.insert(0, op)
15229                                         expression = s1
15230                                 if not expression in sets:
15231                                         display_missing_pkg_set(root_config, expression)
15232                                         return (None, 1)
15233                                 expr_sets.insert(0, expression)
15234                                 result = set(setconfig.getSetAtoms(expression))
15235                                 for i in range(0, len(expr_ops)):
15236                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15237                                         if expr_ops[i] == IS_OPERATOR:
15238                                                 result.intersection_update(s2)
15239                                         elif expr_ops[i] == DIFF_OPERATOR:
15240                                                 result.difference_update(s2)
15241                                         elif expr_ops[i] == UNION_OPERATOR:
15242                                                 result.update(s2)
15243                                         else:
15244                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15245                                 newargs.extend(result)
15246                         else:                   
15247                                 s = a[len(SETPREFIX):]
15248                                 if s not in sets:
15249                                         display_missing_pkg_set(root_config, s)
15250                                         return (None, 1)
15251                                 setconfig.active.append(s)
15252                                 try:
15253                                         set_atoms = setconfig.getSetAtoms(s)
15254                                 except portage.exception.PackageSetNotFound, e:
15255                                         writemsg_level(("emerge: the given set '%s' " + \
15256                                                 "contains a non-existent set named '%s'.\n") % \
15257                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15258                                         return (None, 1)
15259                                 if myaction in unmerge_actions and \
15260                                                 not sets[s].supportsOperation("unmerge"):
15261                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15262                                                 "not support unmerge operations\n")
15263                                         retval = 1
15264                                 elif not set_atoms:
15265                                         print "emerge: '%s' is an empty set" % s
15266                                 elif myaction not in do_not_expand:
15267                                         newargs.extend(set_atoms)
15268                                 else:
15269                                         newargs.append(SETPREFIX+s)
15270                                 for e in sets[s].errors:
15271                                         print e
15272                 else:
15273                         newargs.append(a)
15274         return (newargs, retval)
15275
15276 def repo_name_check(trees):
15277         missing_repo_names = set()
15278         for root, root_trees in trees.iteritems():
15279                 if "porttree" in root_trees:
15280                         portdb = root_trees["porttree"].dbapi
15281                         missing_repo_names.update(portdb.porttrees)
15282                         repos = portdb.getRepositories()
15283                         for r in repos:
15284                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15285                         if portdb.porttree_root in missing_repo_names and \
15286                                 not os.path.exists(os.path.join(
15287                                 portdb.porttree_root, "profiles")):
15288                                 # This is normal if $PORTDIR happens to be empty,
15289                                 # so don't warn about it.
15290                                 missing_repo_names.remove(portdb.porttree_root)
15291
15292         if missing_repo_names:
15293                 msg = []
15294                 msg.append("WARNING: One or more repositories " + \
15295                         "have missing repo_name entries:")
15296                 msg.append("")
15297                 for p in missing_repo_names:
15298                         msg.append("\t%s/profiles/repo_name" % (p,))
15299                 msg.append("")
15300                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15301                         "should be a plain text file containing a unique " + \
15302                         "name for the repository on the first line.", 70))
15303                 writemsg_level("".join("%s\n" % l for l in msg),
15304                         level=logging.WARNING, noiselevel=-1)
15305
15306         return bool(missing_repo_names)
15307
15308 def config_protect_check(trees):
15309         for root, root_trees in trees.iteritems():
15310                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15311                         msg = "!!! CONFIG_PROTECT is empty"
15312                         if root != "/":
15313                                 msg += " for '%s'" % root
15314                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15315
15316 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15317
15318         if "--quiet" in myopts:
15319                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15320                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15321                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15322                         print "    " + colorize("INFORM", cp)
15323                 return
15324
15325         s = search(root_config, spinner, "--searchdesc" in myopts,
15326                 "--quiet" not in myopts, "--usepkg" in myopts,
15327                 "--usepkgonly" in myopts)
15328         null_cp = portage.dep_getkey(insert_category_into_atom(
15329                 arg, "null"))
15330         cat, atom_pn = portage.catsplit(null_cp)
15331         s.searchkey = atom_pn
15332         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15333                 s.addCP(cp)
15334         s.output()
15335         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15336         print "!!! one of the above fully-qualified ebuild names instead.\n"
15337
15338 def profile_check(trees, myaction, myopts):
15339         if myaction in ("info", "sync"):
15340                 return os.EX_OK
15341         elif "--version" in myopts or "--help" in myopts:
15342                 return os.EX_OK
15343         for root, root_trees in trees.iteritems():
15344                 if root_trees["root_config"].settings.profiles:
15345                         continue
15346                 # generate some profile related warning messages
15347                 validate_ebuild_environment(trees)
15348                 msg = "If you have just changed your profile configuration, you " + \
15349                         "should revert back to the previous configuration. Due to " + \
15350                         "your current profile being invalid, allowed actions are " + \
15351                         "limited to --help, --info, --sync, and --version."
15352                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15353                         level=logging.ERROR, noiselevel=-1)
15354                 return 1
15355         return os.EX_OK
15356
15357 def emerge_main():
15358         global portage  # NFC why this is necessary now - genone
15359         portage._disable_legacy_globals()
15360         # Disable color until we're sure that it should be enabled (after
15361         # EMERGE_DEFAULT_OPTS has been parsed).
15362         portage.output.havecolor = 0
15363         # This first pass is just for options that need to be known as early as
15364         # possible, such as --config-root.  They will be parsed again later,
15365         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15366         # the value of --config-root).
15367         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15368         if "--debug" in myopts:
15369                 os.environ["PORTAGE_DEBUG"] = "1"
15370         if "--config-root" in myopts:
15371                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15372         if "--root" in myopts:
15373                 os.environ["ROOT"] = myopts["--root"]
15374
15375         # Portage needs to ensure a sane umask for the files it creates.
15376         os.umask(022)
15377         settings, trees, mtimedb = load_emerge_config()
15378         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15379         rval = profile_check(trees, myaction, myopts)
15380         if rval != os.EX_OK:
15381                 return rval
15382
15383         if portage._global_updates(trees, mtimedb["updates"]):
15384                 mtimedb.commit()
15385                 # Reload the whole config from scratch.
15386                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15387                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15388
15389         xterm_titles = "notitles" not in settings.features
15390
15391         tmpcmdline = []
15392         if "--ignore-default-opts" not in myopts:
15393                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15394         tmpcmdline.extend(sys.argv[1:])
15395         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15396
15397         if "--digest" in myopts:
15398                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15399                 # Reload the whole config from scratch so that the portdbapi internal
15400                 # config is updated with new FEATURES.
15401                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15402                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15403
15404         for myroot in trees:
15405                 mysettings =  trees[myroot]["vartree"].settings
15406                 mysettings.unlock()
15407                 adjust_config(myopts, mysettings)
15408                 if '--pretend' not in myopts and myaction in \
15409                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15410                         mysettings["PORTAGE_COUNTER_HASH"] = \
15411                                 trees[myroot]["vartree"].dbapi._counter_hash()
15412                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15413                 mysettings.lock()
15414                 del myroot, mysettings
15415
15416         apply_priorities(settings)
15417
15418         spinner = stdout_spinner()
15419         if "candy" in settings.features:
15420                 spinner.update = spinner.update_scroll
15421
15422         if "--quiet" not in myopts:
15423                 portage.deprecated_profile_check(settings=settings)
15424                 repo_name_check(trees)
15425                 config_protect_check(trees)
15426
15427         eclasses_overridden = {}
15428         for mytrees in trees.itervalues():
15429                 mydb = mytrees["porttree"].dbapi
15430                 # Freeze the portdbapi for performance (memoize all xmatch results).
15431                 mydb.freeze()
15432                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15433         del mytrees, mydb
15434
15435         if eclasses_overridden and \
15436                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15437                 prefix = bad(" * ")
15438                 if len(eclasses_overridden) == 1:
15439                         writemsg(prefix + "Overlay eclass overrides " + \
15440                                 "eclass from PORTDIR:\n", noiselevel=-1)
15441                 else:
15442                         writemsg(prefix + "Overlay eclasses override " + \
15443                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15444                 writemsg(prefix + "\n", noiselevel=-1)
15445                 for eclass_name in sorted(eclasses_overridden):
15446                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15447                                 (eclasses_overridden[eclass_name], eclass_name),
15448                                 noiselevel=-1)
15449                 writemsg(prefix + "\n", noiselevel=-1)
15450                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15451                 "because it will trigger invalidation of cached ebuild metadata " + \
15452                 "that is distributed with the portage tree. If you must " + \
15453                 "override eclasses from PORTDIR then you are advised to add " + \
15454                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15455                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15456                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15457                 "you would like to disable this warning."
15458                 from textwrap import wrap
15459                 for line in wrap(msg, 72):
15460                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15461
15462         if "moo" in myfiles:
15463                 print """
15464
15465   Larry loves Gentoo (""" + platform.system() + """)
15466
15467  _______________________
15468 < Have you mooed today? >
15469  -----------------------
15470         \   ^__^
15471          \  (oo)\_______
15472             (__)\       )\/\ 
15473                 ||----w |
15474                 ||     ||
15475
15476 """
15477
15478         for x in myfiles:
15479                 ext = os.path.splitext(x)[1]
15480                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15481                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15482                         break
15483
15484         root_config = trees[settings["ROOT"]]["root_config"]
15485         if myaction == "list-sets":
15486                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15487                 sys.stdout.flush()
15488                 return os.EX_OK
15489
15490         # only expand sets for actions taking package arguments
15491         oldargs = myfiles[:]
15492         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15493                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15494                 if retval != os.EX_OK:
15495                         return retval
15496
15497                 # Need to handle empty sets specially, otherwise emerge will react 
15498                 # with the help message for empty argument lists
15499                 if oldargs and not myfiles:
15500                         print "emerge: no targets left after set expansion"
15501                         return 0
15502
15503         if ("--tree" in myopts) and ("--columns" in myopts):
15504                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15505                 return 1
15506
15507         if ("--quiet" in myopts):
15508                 spinner.update = spinner.update_quiet
15509                 portage.util.noiselimit = -1
15510
15511         # Always create packages if FEATURES=buildpkg
15512         # Imply --buildpkg if --buildpkgonly
15513         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15514                 if "--buildpkg" not in myopts:
15515                         myopts["--buildpkg"] = True
15516
15517         # Always try and fetch binary packages if FEATURES=getbinpkg
15518         if ("getbinpkg" in settings.features):
15519                 myopts["--getbinpkg"] = True
15520
15521         if "--buildpkgonly" in myopts:
15522                 # --buildpkgonly will not merge anything, so
15523                 # it cancels all binary package options.
15524                 for opt in ("--getbinpkg", "--getbinpkgonly",
15525                         "--usepkg", "--usepkgonly"):
15526                         myopts.pop(opt, None)
15527
15528         if "--fetch-all-uri" in myopts:
15529                 myopts["--fetchonly"] = True
15530
15531         if "--skipfirst" in myopts and "--resume" not in myopts:
15532                 myopts["--resume"] = True
15533
15534         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15535                 myopts["--usepkgonly"] = True
15536
15537         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15538                 myopts["--getbinpkg"] = True
15539
15540         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15541                 myopts["--usepkg"] = True
15542
15543         # Also allow -K to apply --usepkg/-k
15544         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15545                 myopts["--usepkg"] = True
15546
15547         # Allow -p to remove --ask
15548         if ("--pretend" in myopts) and ("--ask" in myopts):
15549                 print ">>> --pretend disables --ask... removing --ask from options."
15550                 del myopts["--ask"]
15551
15552         # forbid --ask when not in a terminal
15553         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15554         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15555                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15556                         noiselevel=-1)
15557                 return 1
15558
15559         if settings.get("PORTAGE_DEBUG", "") == "1":
15560                 spinner.update = spinner.update_quiet
15561                 portage.debug=1
15562                 if "python-trace" in settings.features:
15563                         import portage.debug
15564                         portage.debug.set_trace(True)
15565
15566         if not ("--quiet" in myopts):
15567                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15568                         spinner.update = spinner.update_basic
15569
15570         if myaction == 'version':
15571                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15572                         settings.profile_path, settings["CHOST"],
15573                         trees[settings["ROOT"]]["vartree"].dbapi)
15574                 return 0
15575         elif "--help" in myopts:
15576                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15577                 return 0
15578
15579         if "--debug" in myopts:
15580                 print "myaction", myaction
15581                 print "myopts", myopts
15582
15583         if not myaction and not myfiles and "--resume" not in myopts:
15584                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15585                 return 1
15586
15587         pretend = "--pretend" in myopts
15588         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15589         buildpkgonly = "--buildpkgonly" in myopts
15590
15591         # check if root user is the current user for the actions where emerge needs this
15592         if portage.secpass < 2:
15593                 # We've already allowed "--version" and "--help" above.
15594                 if "--pretend" not in myopts and myaction not in ("search","info"):
15595                         need_superuser = not \
15596                                 (fetchonly or \
15597                                 (buildpkgonly and secpass >= 1) or \
15598                                 myaction in ("metadata", "regen") or \
15599                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15600                         if portage.secpass < 1 or \
15601                                 need_superuser:
15602                                 if need_superuser:
15603                                         access_desc = "superuser"
15604                                 else:
15605                                         access_desc = "portage group"
15606                                 # Always show portage_group_warning() when only portage group
15607                                 # access is required but the user is not in the portage group.
15608                                 from portage.data import portage_group_warning
15609                                 if "--ask" in myopts:
15610                                         myopts["--pretend"] = True
15611                                         del myopts["--ask"]
15612                                         print ("%s access is required... " + \
15613                                                 "adding --pretend to options.\n") % access_desc
15614                                         if portage.secpass < 1 and not need_superuser:
15615                                                 portage_group_warning()
15616                                 else:
15617                                         sys.stderr.write(("emerge: %s access is " + \
15618                                                 "required.\n\n") % access_desc)
15619                                         if portage.secpass < 1 and not need_superuser:
15620                                                 portage_group_warning()
15621                                         return 1
15622
15623         disable_emergelog = False
15624         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15625                 if x in myopts:
15626                         disable_emergelog = True
15627                         break
15628         if myaction in ("search", "info"):
15629                 disable_emergelog = True
15630         if disable_emergelog:
15631                 """ Disable emergelog for everything except build or unmerge
15632                 operations.  This helps minimize parallel emerge.log entries that can
15633                 confuse log parsers.  We especially want it disabled during
15634                 parallel-fetch, which uses --resume --fetchonly."""
15635                 global emergelog
15636                 def emergelog(*pargs, **kargs):
15637                         pass
15638
15639         if not "--pretend" in myopts:
15640                 emergelog(xterm_titles, "Started emerge on: "+\
15641                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15642                 myelogstr=""
15643                 if myopts:
15644                         myelogstr=" ".join(myopts)
15645                 if myaction:
15646                         myelogstr+=" "+myaction
15647                 if myfiles:
15648                         myelogstr += " " + " ".join(oldargs)
15649                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15650         del oldargs
15651
15652         def emergeexitsig(signum, frame):
15653                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15654                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15655                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15656                 sys.exit(100+signum)
15657         signal.signal(signal.SIGINT, emergeexitsig)
15658         signal.signal(signal.SIGTERM, emergeexitsig)
15659
15660         def emergeexit():
15661                 """This gets out final log message in before we quit."""
15662                 if "--pretend" not in myopts:
15663                         emergelog(xterm_titles, " *** terminating.")
15664                 if "notitles" not in settings.features:
15665                         xtermTitleReset()
15666         portage.atexit_register(emergeexit)
15667
15668         if myaction in ("config", "metadata", "regen", "sync"):
15669                 if "--pretend" in myopts:
15670                         sys.stderr.write(("emerge: The '%s' action does " + \
15671                                 "not support '--pretend'.\n") % myaction)
15672                         return 1
15673
15674         if "sync" == myaction:
15675                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15676         elif "metadata" == myaction:
15677                 action_metadata(settings, portdb, myopts)
15678         elif myaction=="regen":
15679                 validate_ebuild_environment(trees)
15680                 return action_regen(settings, portdb, myopts.get("--jobs"),
15681                         myopts.get("--load-average"))
15682         # HELP action
15683         elif "config"==myaction:
15684                 validate_ebuild_environment(trees)
15685                 action_config(settings, trees, myopts, myfiles)
15686
15687         # SEARCH action
15688         elif "search"==myaction:
15689                 validate_ebuild_environment(trees)
15690                 action_search(trees[settings["ROOT"]]["root_config"],
15691                         myopts, myfiles, spinner)
15692         elif myaction in ("clean", "unmerge") or \
15693                 (myaction == "prune" and "--nodeps" in myopts):
15694                 validate_ebuild_environment(trees)
15695
15696                 # Ensure atoms are valid before calling unmerge().
15697                 # For backward compat, leading '=' is not required.
15698                 for x in myfiles:
15699                         if is_valid_package_atom(x) or \
15700                                 is_valid_package_atom("=" + x):
15701                                 continue
15702                         msg = []
15703                         msg.append("'%s' is not a valid package atom." % (x,))
15704                         msg.append("Please check ebuild(5) for full details.")
15705                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15706                                 level=logging.ERROR, noiselevel=-1)
15707                         return 1
15708
15709                 # When given a list of atoms, unmerge
15710                 # them in the order given.
15711                 ordered = myaction == "unmerge"
15712                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15713                         mtimedb["ldpath"], ordered=ordered):
15714                         if not (buildpkgonly or fetchonly or pretend):
15715                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15716
15717         elif myaction in ("depclean", "info", "prune"):
15718
15719                 # Ensure atoms are valid before calling unmerge().
15720                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15721                 valid_atoms = []
15722                 for x in myfiles:
15723                         if is_valid_package_atom(x):
15724                                 try:
15725                                         valid_atoms.append(
15726                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15727                                 except portage.exception.AmbiguousPackageName, e:
15728                                         msg = "The short ebuild name \"" + x + \
15729                                                 "\" is ambiguous.  Please specify " + \
15730                                                 "one of the following " + \
15731                                                 "fully-qualified ebuild names instead:"
15732                                         for line in textwrap.wrap(msg, 70):
15733                                                 writemsg_level("!!! %s\n" % (line,),
15734                                                         level=logging.ERROR, noiselevel=-1)
15735                                         for i in e[0]:
15736                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15737                                                         level=logging.ERROR, noiselevel=-1)
15738                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15739                                         return 1
15740                                 continue
15741                         msg = []
15742                         msg.append("'%s' is not a valid package atom." % (x,))
15743                         msg.append("Please check ebuild(5) for full details.")
15744                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15745                                 level=logging.ERROR, noiselevel=-1)
15746                         return 1
15747
15748                 if myaction == "info":
15749                         return action_info(settings, trees, myopts, valid_atoms)
15750
15751                 validate_ebuild_environment(trees)
15752                 action_depclean(settings, trees, mtimedb["ldpath"],
15753                         myopts, myaction, valid_atoms, spinner)
15754                 if not (buildpkgonly or fetchonly or pretend):
15755                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15756         # "update", "system", or just process files:
15757         else:
15758                 validate_ebuild_environment(trees)
15759
15760                 for x in myfiles:
15761                         if x.startswith(SETPREFIX) or \
15762                                 is_valid_package_atom(x):
15763                                 continue
15764                         if x[:1] == os.sep:
15765                                 continue
15766                         try:
15767                                 os.lstat(x)
15768                                 continue
15769                         except OSError:
15770                                 pass
15771                         msg = []
15772                         msg.append("'%s' is not a valid package atom." % (x,))
15773                         msg.append("Please check ebuild(5) for full details.")
15774                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15775                                 level=logging.ERROR, noiselevel=-1)
15776                         return 1
15777
15778                 if "--pretend" not in myopts:
15779                         display_news_notification(root_config, myopts)
15780                 retval = action_build(settings, trees, mtimedb,
15781                         myopts, myaction, myfiles, spinner)
15782                 root_config = trees[settings["ROOT"]]["root_config"]
15783                 post_emerge(root_config, myopts, mtimedb, retval)
15784
15785                 return retval