When in --quiet mode, still show a message for failures in
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
58
59 from itertools import chain, izip
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         from cStringIO import StringIO
68 except ImportError:
69         from StringIO import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge", "version",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--rdeps-only",   "--root-deps",
211 "--searchdesc",   "--selective",
212 "--skipfirst",
213 "--tree",
214 "--update",
215 "--usepkg",       "--usepkgonly",
216 "--verbose",
217 ]
218
219 shortmapping={
220 "1":"--oneshot",
221 "a":"--ask",
222 "b":"--buildpkg",  "B":"--buildpkgonly",
223 "c":"--clean",     "C":"--unmerge",
224 "d":"--debug",     "D":"--deep",
225 "e":"--emptytree",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "h":"--help",
229 "k":"--usepkg",    "K":"--usepkgonly",
230 "l":"--changelog",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps",  "O":"--nodeps",
233 "p":"--pretend",   "P":"--prune",
234 "q":"--quiet",
235 "s":"--search",    "S":"--searchdesc",
236 "t":"--tree",
237 "u":"--update",
238 "v":"--verbose",   "V":"--version"
239 }
240
241 def emergelog(xterm_titles, mystr, short_msg=None):
242         if xterm_titles and short_msg:
243                 if "HOSTNAME" in os.environ:
244                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
245                 xtermTitle(short_msg)
246         try:
247                 file_path = "/var/log/emerge.log"
248                 mylogfile = open(file_path, "a")
249                 portage.util.apply_secpass_permissions(file_path,
250                         uid=portage.portage_uid, gid=portage.portage_gid,
251                         mode=0660)
252                 mylock = None
253                 try:
254                         mylock = portage.locks.lockfile(mylogfile)
255                         # seek because we may have gotten held up by the lock.
256                         # if so, we may not be positioned at the end of the file.
257                         mylogfile.seek(0, 2)
258                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
259                         mylogfile.flush()
260                 finally:
261                         if mylock:
262                                 portage.locks.unlockfile(mylock)
263                         mylogfile.close()
264         except (IOError,OSError,portage.exception.PortageException), e:
265                 if secpass >= 1:
266                         print >> sys.stderr, "emergelog():",e
267
268 def countdown(secs=5, doing="Starting"):
269         if secs:
270                 print ">>> Waiting",secs,"seconds before starting..."
271                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
272                 ticks=range(secs)
273                 ticks.reverse()
274                 for sec in ticks:
275                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
276                         sys.stdout.flush()
277                         time.sleep(1)
278                 print
279
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282         if isinstance(mysize, basestring):
283                 return mysize
284         if 0 != mysize % 1024:
285                 # Always round up to the next kB so that it doesn't show 0 kB when
286                 # some small file still needs to be fetched.
287                 mysize += 1024 - mysize % 1024
288         mystr=str(mysize/1024)
289         mycount=len(mystr)
290         while (mycount > 3):
291                 mycount-=3
292                 mystr=mystr[:mycount]+","+mystr[mycount:]
293         return mystr+" kB"
294
295
296 def getgccversion(chost):
297         """
298         rtype: C{str}
299         return:  the current in-use gcc version
300         """
301
302         gcc_ver_command = 'gcc -dumpversion'
303         gcc_ver_prefix = 'gcc-'
304
305         gcc_not_found_error = red(
306         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307         "!!! to update the environment of this terminal and possibly\n" +
308         "!!! other terminals also.\n"
309         )
310
311         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314
315         mystatus, myoutput = commands.getstatusoutput(
316                 chost + "-" + gcc_ver_command)
317         if mystatus == os.EX_OK:
318                 return gcc_ver_prefix + myoutput
319
320         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321         if mystatus == os.EX_OK:
322                 return gcc_ver_prefix + myoutput
323
324         portage.writemsg(gcc_not_found_error, noiselevel=-1)
325         return "[unavailable]"
326
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328         profilever = "unavailable"
329         if profile:
330                 realpath = os.path.realpath(profile)
331                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
332                 if realpath.startswith(basepath):
333                         profilever = realpath[1 + len(basepath):]
334                 else:
335                         try:
336                                 profilever = "!" + os.readlink(profile)
337                         except (OSError):
338                                 pass
339                 del realpath, basepath
340
341         libcver=[]
342         libclist  = vardb.match("virtual/libc")
343         libclist += vardb.match("virtual/glibc")
344         libclist  = portage.util.unique_array(libclist)
345         for x in libclist:
346                 xs=portage.catpkgsplit(x)
347                 if libcver:
348                         libcver+=","+"-".join(xs[1:])
349                 else:
350                         libcver="-".join(xs[1:])
351         if libcver==[]:
352                 libcver="unavailable"
353
354         gccver = getgccversion(chost)
355         unameout=platform.release()+" "+platform.machine()
356
357         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358
359 def create_depgraph_params(myopts, myaction):
360         #configure emerge engine parameters
361         #
362         # self:      include _this_ package regardless of if it is merged.
363         # selective: exclude the package if it is merged
364         # recurse:   go into the dependencies
365         # deep:      go into the dependencies of already merged packages
366         # empty:     pretend nothing is merged
367         # complete:  completely account for all known dependencies
368         # remove:    build graph for use in removing packages
369         myparams = set(["recurse"])
370
371         if myaction == "remove":
372                 myparams.add("remove")
373                 myparams.add("complete")
374                 return myparams
375
376         if "--update" in myopts or \
377                 "--newuse" in myopts or \
378                 "--reinstall" in myopts or \
379                 "--noreplace" in myopts:
380                 myparams.add("selective")
381         if "--emptytree" in myopts:
382                 myparams.add("empty")
383                 myparams.discard("selective")
384         if "--nodeps" in myopts:
385                 myparams.discard("recurse")
386         if "--deep" in myopts:
387                 myparams.add("deep")
388         if "--complete-graph" in myopts:
389                 myparams.add("complete")
390         return myparams
391
392 # search functionality
393 class search(object):
394
395         #
396         # class constants
397         #
398         VERSION_SHORT=1
399         VERSION_RELEASE=2
400
401         #
402         # public interface
403         #
404         def __init__(self, root_config, spinner, searchdesc,
405                 verbose, usepkg, usepkgonly):
406                 """Searches the available and installed packages for the supplied search key.
407                 The list of available and installed packages is created at object instantiation.
408                 This makes successive searches faster."""
409                 self.settings = root_config.settings
410                 self.vartree = root_config.trees["vartree"]
411                 self.spinner = spinner
412                 self.verbose = verbose
413                 self.searchdesc = searchdesc
414                 self.root_config = root_config
415                 self.setconfig = root_config.setconfig
416                 self.matches = {"pkg" : []}
417                 self.mlen = 0
418
419                 def fake_portdb():
420                         pass
421                 self.portdb = fake_portdb
422                 for attrib in ("aux_get", "cp_all",
423                         "xmatch", "findname", "getFetchMap"):
424                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
425
426                 self._dbs = []
427
428                 portdb = root_config.trees["porttree"].dbapi
429                 bindb = root_config.trees["bintree"].dbapi
430                 vardb = root_config.trees["vartree"].dbapi
431
432                 if not usepkgonly and portdb._have_root_eclass_dir:
433                         self._dbs.append(portdb)
434
435                 if (usepkg or usepkgonly) and bindb.cp_all():
436                         self._dbs.append(bindb)
437
438                 self._dbs.append(vardb)
439                 self._portdb = portdb
440
441         def _cp_all(self):
442                 cp_all = set()
443                 for db in self._dbs:
444                         cp_all.update(db.cp_all())
445                 return list(sorted(cp_all))
446
447         def _aux_get(self, *args, **kwargs):
448                 for db in self._dbs:
449                         try:
450                                 return db.aux_get(*args, **kwargs)
451                         except KeyError:
452                                 pass
453                 raise
454
455         def _findname(self, *args, **kwargs):
456                 for db in self._dbs:
457                         if db is not self._portdb:
458                                 # We don't want findname to return anything
459                                 # unless it's an ebuild in a portage tree.
460                                 # Otherwise, it's already built and we don't
461                                 # care about it.
462                                 continue
463                         func = getattr(db, "findname", None)
464                         if func:
465                                 value = func(*args, **kwargs)
466                                 if value:
467                                         return value
468                 return None
469
470         def _getFetchMap(self, *args, **kwargs):
471                 for db in self._dbs:
472                         func = getattr(db, "getFetchMap", None)
473                         if func:
474                                 value = func(*args, **kwargs)
475                                 if value:
476                                         return value
477                 return {}
478
479         def _visible(self, db, cpv, metadata):
480                 installed = db is self.vartree.dbapi
481                 built = installed or db is not self._portdb
482                 pkg_type = "ebuild"
483                 if installed:
484                         pkg_type = "installed"
485                 elif built:
486                         pkg_type = "binary"
487                 return visible(self.settings,
488                         Package(type_name=pkg_type, root_config=self.root_config,
489                         cpv=cpv, built=built, installed=installed, metadata=metadata))
490
491         def _xmatch(self, level, atom):
492                 """
493                 This method does not expand old-style virtuals because it
494                 is restricted to returning matches for a single ${CATEGORY}/${PN}
495                 and old-style virual matches unreliable for that when querying
496                 multiple package databases. If necessary, old-style virtuals
497                 can be performed on atoms prior to calling this method.
498                 """
499                 cp = portage.dep_getkey(atom)
500                 if level == "match-all":
501                         matches = set()
502                         for db in self._dbs:
503                                 if hasattr(db, "xmatch"):
504                                         matches.update(db.xmatch(level, atom))
505                                 else:
506                                         matches.update(db.match(atom))
507                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508                         db._cpv_sort_ascending(result)
509                 elif level == "match-visible":
510                         matches = set()
511                         for db in self._dbs:
512                                 if hasattr(db, "xmatch"):
513                                         matches.update(db.xmatch(level, atom))
514                                 else:
515                                         db_keys = list(db._aux_cache_keys)
516                                         for cpv in db.match(atom):
517                                                 metadata = izip(db_keys,
518                                                         db.aux_get(cpv, db_keys))
519                                                 if not self._visible(db, cpv, metadata):
520                                                         continue
521                                                 matches.add(cpv)
522                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523                         db._cpv_sort_ascending(result)
524                 elif level == "bestmatch-visible":
525                         result = None
526                         for db in self._dbs:
527                                 if hasattr(db, "xmatch"):
528                                         cpv = db.xmatch("bestmatch-visible", atom)
529                                         if not cpv or portage.cpv_getkey(cpv) != cp:
530                                                 continue
531                                         if not result or cpv == portage.best([cpv, result]):
532                                                 result = cpv
533                                 else:
534                                         db_keys = Package.metadata_keys
535                                         # break out of this loop with highest visible
536                                         # match, checked in descending order
537                                         for cpv in reversed(db.match(atom)):
538                                                 if portage.cpv_getkey(cpv) != cp:
539                                                         continue
540                                                 metadata = izip(db_keys,
541                                                         db.aux_get(cpv, db_keys))
542                                                 if not self._visible(db, cpv, metadata):
543                                                         continue
544                                                 if not result or cpv == portage.best([cpv, result]):
545                                                         result = cpv
546                                                 break
547                 else:
548                         raise NotImplementedError(level)
549                 return result
550
551         def execute(self,searchkey):
552                 """Performs the search for the supplied search key"""
553                 match_category = 0
554                 self.searchkey=searchkey
555                 self.packagematches = []
556                 if self.searchdesc:
557                         self.searchdesc=1
558                         self.matches = {"pkg":[], "desc":[], "set":[]}
559                 else:
560                         self.searchdesc=0
561                         self.matches = {"pkg":[], "set":[]}
562                 print "Searching...   ",
563
564                 regexsearch = False
565                 if self.searchkey.startswith('%'):
566                         regexsearch = True
567                         self.searchkey = self.searchkey[1:]
568                 if self.searchkey.startswith('@'):
569                         match_category = 1
570                         self.searchkey = self.searchkey[1:]
571                 if regexsearch:
572                         self.searchre=re.compile(self.searchkey,re.I)
573                 else:
574                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
575                 for package in self.portdb.cp_all():
576                         self.spinner.update()
577
578                         if match_category:
579                                 match_string  = package[:]
580                         else:
581                                 match_string  = package.split("/")[-1]
582
583                         masked=0
584                         if self.searchre.search(match_string):
585                                 if not self.portdb.xmatch("match-visible", package):
586                                         masked=1
587                                 self.matches["pkg"].append([package,masked])
588                         elif self.searchdesc: # DESCRIPTION searching
589                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
590                                 if not full_package:
591                                         #no match found; we don't want to query description
592                                         full_package = portage.best(
593                                                 self.portdb.xmatch("match-all", package))
594                                         if not full_package:
595                                                 continue
596                                         else:
597                                                 masked=1
598                                 try:
599                                         full_desc = self.portdb.aux_get(
600                                                 full_package, ["DESCRIPTION"])[0]
601                                 except KeyError:
602                                         print "emerge: search: aux_get() failed, skipping"
603                                         continue
604                                 if self.searchre.search(full_desc):
605                                         self.matches["desc"].append([full_package,masked])
606
607                 self.sdict = self.setconfig.getSets()
608                 for setname in self.sdict:
609                         self.spinner.update()
610                         if match_category:
611                                 match_string = setname
612                         else:
613                                 match_string = setname.split("/")[-1]
614                         
615                         if self.searchre.search(match_string):
616                                 self.matches["set"].append([setname, False])
617                         elif self.searchdesc:
618                                 if self.searchre.search(
619                                         self.sdict[setname].getMetadata("DESCRIPTION")):
620                                         self.matches["set"].append([setname, False])
621                         
622                 self.mlen=0
623                 for mtype in self.matches:
624                         self.matches[mtype].sort()
625                         self.mlen += len(self.matches[mtype])
626
627         def addCP(self, cp):
628                 if not self.portdb.xmatch("match-all", cp):
629                         return
630                 masked = 0
631                 if not self.portdb.xmatch("bestmatch-visible", cp):
632                         masked = 1
633                 self.matches["pkg"].append([cp, masked])
634                 self.mlen += 1
635
636         def output(self):
637                 """Outputs the results of the search."""
638                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
639                 print "[ Applications found : "+white(str(self.mlen))+" ]"
640                 print " "
641                 vardb = self.vartree.dbapi
642                 for mtype in self.matches:
643                         for match,masked in self.matches[mtype]:
644                                 full_package = None
645                                 if mtype == "pkg":
646                                         catpack = match
647                                         full_package = self.portdb.xmatch(
648                                                 "bestmatch-visible", match)
649                                         if not full_package:
650                                                 #no match found; we don't want to query description
651                                                 masked=1
652                                                 full_package = portage.best(
653                                                         self.portdb.xmatch("match-all",match))
654                                 elif mtype == "desc":
655                                         full_package = match
656                                         match        = portage.cpv_getkey(match)
657                                 elif mtype == "set":
658                                         print green("*")+"  "+white(match)
659                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
660                                         print
661                                 if full_package:
662                                         try:
663                                                 desc, homepage, license = self.portdb.aux_get(
664                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665                                         except KeyError:
666                                                 print "emerge: search: aux_get() failed, skipping"
667                                                 continue
668                                         if masked:
669                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
670                                         else:
671                                                 print green("*")+"  "+white(match)
672                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
673
674                                         mysum = [0,0]
675                                         file_size_str = None
676                                         mycat = match.split("/")[0]
677                                         mypkg = match.split("/")[1]
678                                         mycpv = match + "-" + myversion
679                                         myebuild = self.portdb.findname(mycpv)
680                                         if myebuild:
681                                                 pkgdir = os.path.dirname(myebuild)
682                                                 from portage import manifest
683                                                 mf = manifest.Manifest(
684                                                         pkgdir, self.settings["DISTDIR"])
685                                                 try:
686                                                         uri_map = self.portdb.getFetchMap(mycpv)
687                                                 except portage.exception.InvalidDependString, e:
688                                                         file_size_str = "Unknown (%s)" % (e,)
689                                                         del e
690                                                 else:
691                                                         try:
692                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
693                                                         except KeyError, e:
694                                                                 file_size_str = "Unknown (missing " + \
695                                                                         "digest for %s)" % (e,)
696                                                                 del e
697
698                                         available = False
699                                         for db in self._dbs:
700                                                 if db is not vardb and \
701                                                         db.cpv_exists(mycpv):
702                                                         available = True
703                                                         if not myebuild and hasattr(db, "bintree"):
704                                                                 myebuild = db.bintree.getname(mycpv)
705                                                                 try:
706                                                                         mysum[0] = os.stat(myebuild).st_size
707                                                                 except OSError:
708                                                                         myebuild = None
709                                                         break
710
711                                         if myebuild and file_size_str is None:
712                                                 mystr = str(mysum[0] / 1024)
713                                                 mycount = len(mystr)
714                                                 while (mycount > 3):
715                                                         mycount -= 3
716                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
717                                                 file_size_str = mystr + " kB"
718
719                                         if self.verbose:
720                                                 if available:
721                                                         print "     ", darkgreen("Latest version available:"),myversion
722                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
723                                                 if myebuild:
724                                                         print "      %s %s" % \
725                                                                 (darkgreen("Size of files:"), file_size_str)
726                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
727                                                 print "     ", darkgreen("Description:")+"  ",desc
728                                                 print "     ", darkgreen("License:")+"      ",license
729                                                 print
730         #
731         # private interface
732         #
733         def getInstallationStatus(self,package):
734                 installed_package = self.vartree.dep_bestmatch(package)
735                 result = ""
736                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737                 if len(version) > 0:
738                         result = darkgreen("Latest version installed:")+" "+version
739                 else:
740                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741                 return result
742
743         def getVersion(self,full_package,detail):
744                 if len(full_package) > 1:
745                         package_parts = portage.catpkgsplit(full_package)
746                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747                                 result = package_parts[2]+ "-" + package_parts[3]
748                         else:
749                                 result = package_parts[2]
750                 else:
751                         result = ""
752                 return result
753
754 class RootConfig(object):
755         """This is used internally by depgraph to track information about a
756         particular $ROOT."""
757
758         pkg_tree_map = {
759                 "ebuild"    : "porttree",
760                 "binary"    : "bintree",
761                 "installed" : "vartree"
762         }
763
764         tree_pkg_map = {}
765         for k, v in pkg_tree_map.iteritems():
766                 tree_pkg_map[v] = k
767
768         def __init__(self, settings, trees, setconfig):
769                 self.trees = trees
770                 self.settings = settings
771                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772                 self.root = self.settings["ROOT"]
773                 self.setconfig = setconfig
774                 self.sets = self.setconfig.getSets()
775                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
776
777 def create_world_atom(pkg, args_set, root_config):
778         """Create a new atom for the world file if one does not exist.  If the
779         argument atom is precise enough to identify a specific slot then a slot
780         atom will be returned. Atoms that are in the system set may also be stored
781         in world since system atoms can only match one slot while world atoms can
782         be greedy with respect to slots.  Unslotted system packages will not be
783         stored in world."""
784
785         arg_atom = args_set.findAtomForPackage(pkg)
786         if not arg_atom:
787                 return None
788         cp = portage.dep_getkey(arg_atom)
789         new_world_atom = cp
790         sets = root_config.sets
791         portdb = root_config.trees["porttree"].dbapi
792         vardb = root_config.trees["vartree"].dbapi
793         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
794                 for cpv in portdb.match(cp))
795         slotted = len(available_slots) > 1 or \
796                 (len(available_slots) == 1 and "0" not in available_slots)
797         if not slotted:
798                 # check the vdb in case this is multislot
799                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
800                         for cpv in vardb.match(cp))
801                 slotted = len(available_slots) > 1 or \
802                         (len(available_slots) == 1 and "0" not in available_slots)
803         if slotted and arg_atom != cp:
804                 # If the user gave a specific atom, store it as a
805                 # slot atom in the world file.
806                 slot_atom = pkg.slot_atom
807
808                 # For USE=multislot, there are a couple of cases to
809                 # handle here:
810                 #
811                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
812                 #    unknown value, so just record an unslotted atom.
813                 #
814                 # 2) SLOT comes from an installed package and there is no
815                 #    matching SLOT in the portage tree.
816                 #
817                 # Make sure that the slot atom is available in either the
818                 # portdb or the vardb, since otherwise the user certainly
819                 # doesn't want the SLOT atom recorded in the world file
820                 # (case 1 above).  If it's only available in the vardb,
821                 # the user may be trying to prevent a USE=multislot
822                 # package from being removed by --depclean (case 2 above).
823
824                 mydb = portdb
825                 if not portdb.match(slot_atom):
826                         # SLOT seems to come from an installed multislot package
827                         mydb = vardb
828                 # If there is no installed package matching the SLOT atom,
829                 # it probably changed SLOT spontaneously due to USE=multislot,
830                 # so just record an unslotted atom.
831                 if vardb.match(slot_atom):
832                         # Now verify that the argument is precise
833                         # enough to identify a specific slot.
834                         matches = mydb.match(arg_atom)
835                         matched_slots = set()
836                         for cpv in matches:
837                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
838                         if len(matched_slots) == 1:
839                                 new_world_atom = slot_atom
840
841         if new_world_atom == sets["world"].findAtomForPackage(pkg):
842                 # Both atoms would be identical, so there's nothing to add.
843                 return None
844         if not slotted:
845                 # Unlike world atoms, system atoms are not greedy for slots, so they
846                 # can't be safely excluded from world if they are slotted.
847                 system_atom = sets["system"].findAtomForPackage(pkg)
848                 if system_atom:
849                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
850                                 return None
851                         # System virtuals aren't safe to exclude from world since they can
852                         # match multiple old-style virtuals but only one of them will be
853                         # pulled in by update or depclean.
854                         providers = portdb.mysettings.getvirtuals().get(
855                                 portage.dep_getkey(system_atom))
856                         if providers and len(providers) == 1 and providers[0] == cp:
857                                 return None
858         return new_world_atom
859
860 def filter_iuse_defaults(iuse):
861         for flag in iuse:
862                 if flag.startswith("+") or flag.startswith("-"):
863                         yield flag[1:]
864                 else:
865                         yield flag
866
867 class SlotObject(object):
868         __slots__ = ("__weakref__",)
869
870         def __init__(self, **kwargs):
871                 classes = [self.__class__]
872                 while classes:
873                         c = classes.pop()
874                         if c is SlotObject:
875                                 continue
876                         classes.extend(c.__bases__)
877                         slots = getattr(c, "__slots__", None)
878                         if not slots:
879                                 continue
880                         for myattr in slots:
881                                 myvalue = kwargs.get(myattr, None)
882                                 setattr(self, myattr, myvalue)
883
884         def copy(self):
885                 """
886                 Create a new instance and copy all attributes
887                 defined from __slots__ (including those from
888                 inherited classes).
889                 """
890                 obj = self.__class__()
891
892                 classes = [self.__class__]
893                 while classes:
894                         c = classes.pop()
895                         if c is SlotObject:
896                                 continue
897                         classes.extend(c.__bases__)
898                         slots = getattr(c, "__slots__", None)
899                         if not slots:
900                                 continue
901                         for myattr in slots:
902                                 setattr(obj, myattr, getattr(self, myattr))
903
904                 return obj
905
906 class AbstractDepPriority(SlotObject):
907         __slots__ = ("buildtime", "runtime", "runtime_post")
908
909         def __lt__(self, other):
910                 return self.__int__() < other
911
912         def __le__(self, other):
913                 return self.__int__() <= other
914
915         def __eq__(self, other):
916                 return self.__int__() == other
917
918         def __ne__(self, other):
919                 return self.__int__() != other
920
921         def __gt__(self, other):
922                 return self.__int__() > other
923
924         def __ge__(self, other):
925                 return self.__int__() >= other
926
927         def copy(self):
928                 import copy
929                 return copy.copy(self)
930
931 class DepPriority(AbstractDepPriority):
932
933         __slots__ = ("satisfied", "optional", "rebuild")
934
935         def __int__(self):
936                 return 0
937
938         def __str__(self):
939                 if self.optional:
940                         return "optional"
941                 if self.buildtime:
942                         return "buildtime"
943                 if self.runtime:
944                         return "runtime"
945                 if self.runtime_post:
946                         return "runtime_post"
947                 return "soft"
948
949 class BlockerDepPriority(DepPriority):
950         __slots__ = ()
951         def __int__(self):
952                 return 0
953
954         def __str__(self):
955                 return 'blocker'
956
957 BlockerDepPriority.instance = BlockerDepPriority()
958
959 class UnmergeDepPriority(AbstractDepPriority):
960         __slots__ = ("optional", "satisfied",)
961         """
962         Combination of properties           Priority  Category
963
964         runtime                                0       HARD
965         runtime_post                          -1       HARD
966         buildtime                             -2       SOFT
967         (none of the above)                   -2       SOFT
968         """
969
970         MAX    =  0
971         SOFT   = -2
972         MIN    = -2
973
974         def __int__(self):
975                 if self.runtime:
976                         return 0
977                 if self.runtime_post:
978                         return -1
979                 if self.buildtime:
980                         return -2
981                 return -2
982
983         def __str__(self):
984                 myvalue = self.__int__()
985                 if myvalue > self.SOFT:
986                         return "hard"
987                 return "soft"
988
989 class DepPriorityNormalRange(object):
990         """
991         DepPriority properties              Index      Category
992
993         buildtime                                      HARD
994         runtime                                3       MEDIUM
995         runtime_post                           2       MEDIUM_SOFT
996         optional                               1       SOFT
997         (none of the above)                    0       NONE
998         """
999         MEDIUM      = 3
1000         MEDIUM_SOFT = 2
1001         SOFT        = 1
1002         NONE        = 0
1003
1004         @classmethod
1005         def _ignore_optional(cls, priority):
1006                 if priority.__class__ is not DepPriority:
1007                         return False
1008                 return bool(priority.optional)
1009
1010         @classmethod
1011         def _ignore_runtime_post(cls, priority):
1012                 if priority.__class__ is not DepPriority:
1013                         return False
1014                 return bool(priority.optional or priority.runtime_post)
1015
1016         @classmethod
1017         def _ignore_runtime(cls, priority):
1018                 if priority.__class__ is not DepPriority:
1019                         return False
1020                 return not priority.buildtime
1021
1022         ignore_medium      = _ignore_runtime
1023         ignore_medium_soft = _ignore_runtime_post
1024         ignore_soft        = _ignore_optional
1025
1026 DepPriorityNormalRange.ignore_priority = (
1027         None,
1028         DepPriorityNormalRange._ignore_optional,
1029         DepPriorityNormalRange._ignore_runtime_post,
1030         DepPriorityNormalRange._ignore_runtime
1031 )
1032
1033 class DepPrioritySatisfiedRange(object):
1034         """
1035         DepPriority                         Index      Category
1036
1037         not satisfied and buildtime                    HARD
1038         not satisfied and runtime              7       MEDIUM
1039         not satisfied and runtime_post         6       MEDIUM_SOFT
1040         satisfied and buildtime and rebuild    5       SOFT
1041         satisfied and buildtime                4       SOFT
1042         satisfied and runtime                  3       SOFT
1043         satisfied and runtime_post             2       SOFT
1044         optional                               1       SOFT
1045         (none of the above)                    0       NONE
1046         """
1047         MEDIUM      = 7
1048         MEDIUM_SOFT = 6
1049         SOFT        = 5
1050         NONE        = 0
1051
1052         @classmethod
1053         def _ignore_optional(cls, priority):
1054                 if priority.__class__ is not DepPriority:
1055                         return False
1056                 return bool(priority.optional)
1057
1058         @classmethod
1059         def _ignore_satisfied_runtime_post(cls, priority):
1060                 if priority.__class__ is not DepPriority:
1061                         return False
1062                 if priority.optional:
1063                         return True
1064                 if not priority.satisfied:
1065                         return False
1066                 return bool(priority.runtime_post)
1067
1068         @classmethod
1069         def _ignore_satisfied_runtime(cls, priority):
1070                 if priority.__class__ is not DepPriority:
1071                         return False
1072                 if priority.optional:
1073                         return True
1074                 if not priority.satisfied:
1075                         return False
1076                 return not priority.buildtime
1077
1078         @classmethod
1079         def _ignore_satisfied_buildtime(cls, priority):
1080                 if priority.__class__ is not DepPriority:
1081                         return False
1082                 if priority.optional:
1083                         return True
1084                 if not priority.satisfied:
1085                         return False
1086                 if priority.buildtime:
1087                         return not priority.rebuild
1088                 return True
1089
1090         @classmethod
1091         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1092                 if priority.__class__ is not DepPriority:
1093                         return False
1094                 if priority.optional:
1095                         return True
1096                 return bool(priority.satisfied)
1097
1098         @classmethod
1099         def _ignore_runtime_post(cls, priority):
1100                 if priority.__class__ is not DepPriority:
1101                         return False
1102                 return bool(priority.optional or \
1103                         priority.satisfied or \
1104                         priority.runtime_post)
1105
1106         @classmethod
1107         def _ignore_runtime(cls, priority):
1108                 if priority.__class__ is not DepPriority:
1109                         return False
1110                 return bool(priority.satisfied or \
1111                         not priority.buildtime)
1112
1113         ignore_medium      = _ignore_runtime
1114         ignore_medium_soft = _ignore_runtime_post
1115         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1116
1117 DepPrioritySatisfiedRange.ignore_priority = (
1118         None,
1119         DepPrioritySatisfiedRange._ignore_optional,
1120         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1121         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1122         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1123         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1124         DepPrioritySatisfiedRange._ignore_runtime_post,
1125         DepPrioritySatisfiedRange._ignore_runtime
1126 )
1127
1128 def _find_deep_system_runtime_deps(graph):
1129         deep_system_deps = set()
1130         node_stack = []
1131         for node in graph:
1132                 if not isinstance(node, Package) or \
1133                         node.operation == 'uninstall':
1134                         continue
1135                 if node.root_config.sets['system'].findAtomForPackage(node):
1136                         node_stack.append(node)
1137
1138         def ignore_priority(priority):
1139                 """
1140                 Ignore non-runtime priorities.
1141                 """
1142                 if isinstance(priority, DepPriority) and \
1143                         (priority.runtime or priority.runtime_post):
1144                         return False
1145                 return True
1146
1147         while node_stack:
1148                 node = node_stack.pop()
1149                 if node in deep_system_deps:
1150                         continue
1151                 deep_system_deps.add(node)
1152                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1153                         if not isinstance(child, Package) or \
1154                                 child.operation == 'uninstall':
1155                                 continue
1156                         node_stack.append(child)
1157
1158         return deep_system_deps
1159
1160 class FakeVartree(portage.vartree):
1161         """This is implements an in-memory copy of a vartree instance that provides
1162         all the interfaces required for use by the depgraph.  The vardb is locked
1163         during the constructor call just long enough to read a copy of the
1164         installed package information.  This allows the depgraph to do it's
1165         dependency calculations without holding a lock on the vardb.  It also
1166         allows things like vardb global updates to be done in memory so that the
1167         user doesn't necessarily need write access to the vardb in cases where
1168         global updates are necessary (updates are performed when necessary if there
1169         is not a matching ebuild in the tree)."""
1170         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1171                 self._root_config = root_config
1172                 if pkg_cache is None:
1173                         pkg_cache = {}
1174                 real_vartree = root_config.trees["vartree"]
1175                 portdb = root_config.trees["porttree"].dbapi
1176                 self.root = real_vartree.root
1177                 self.settings = real_vartree.settings
1178                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1179                 if "_mtime_" not in mykeys:
1180                         mykeys.append("_mtime_")
1181                 self._db_keys = mykeys
1182                 self._pkg_cache = pkg_cache
1183                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1184                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1185                 try:
1186                         # At least the parent needs to exist for the lock file.
1187                         portage.util.ensure_dirs(vdb_path)
1188                 except portage.exception.PortageException:
1189                         pass
1190                 vdb_lock = None
1191                 try:
1192                         if acquire_lock and os.access(vdb_path, os.W_OK):
1193                                 vdb_lock = portage.locks.lockdir(vdb_path)
1194                         real_dbapi = real_vartree.dbapi
1195                         slot_counters = {}
1196                         for cpv in real_dbapi.cpv_all():
1197                                 cache_key = ("installed", self.root, cpv, "nomerge")
1198                                 pkg = self._pkg_cache.get(cache_key)
1199                                 if pkg is not None:
1200                                         metadata = pkg.metadata
1201                                 else:
1202                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1203                                 myslot = metadata["SLOT"]
1204                                 mycp = portage.dep_getkey(cpv)
1205                                 myslot_atom = "%s:%s" % (mycp, myslot)
1206                                 try:
1207                                         mycounter = long(metadata["COUNTER"])
1208                                 except ValueError:
1209                                         mycounter = 0
1210                                         metadata["COUNTER"] = str(mycounter)
1211                                 other_counter = slot_counters.get(myslot_atom, None)
1212                                 if other_counter is not None:
1213                                         if other_counter > mycounter:
1214                                                 continue
1215                                 slot_counters[myslot_atom] = mycounter
1216                                 if pkg is None:
1217                                         pkg = Package(built=True, cpv=cpv,
1218                                                 installed=True, metadata=metadata,
1219                                                 root_config=root_config, type_name="installed")
1220                                 self._pkg_cache[pkg] = pkg
1221                                 self.dbapi.cpv_inject(pkg)
1222                         real_dbapi.flush_cache()
1223                 finally:
1224                         if vdb_lock:
1225                                 portage.locks.unlockdir(vdb_lock)
1226                 # Populate the old-style virtuals using the cached values.
1227                 if not self.settings.treeVirtuals:
1228                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1229                                 portage.getCPFromCPV, self.get_all_provides())
1230
1231                 # Intialize variables needed for lazy cache pulls of the live ebuild
1232                 # metadata.  This ensures that the vardb lock is released ASAP, without
1233                 # being delayed in case cache generation is triggered.
1234                 self._aux_get = self.dbapi.aux_get
1235                 self.dbapi.aux_get = self._aux_get_wrapper
1236                 self._match = self.dbapi.match
1237                 self.dbapi.match = self._match_wrapper
1238                 self._aux_get_history = set()
1239                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1240                 self._portdb = portdb
1241                 self._global_updates = None
1242
1243         def _match_wrapper(self, cpv, use_cache=1):
1244                 """
1245                 Make sure the metadata in Package instances gets updated for any
1246                 cpv that is returned from a match() call, since the metadata can
1247                 be accessed directly from the Package instance instead of via
1248                 aux_get().
1249                 """
1250                 matches = self._match(cpv, use_cache=use_cache)
1251                 for cpv in matches:
1252                         if cpv in self._aux_get_history:
1253                                 continue
1254                         self._aux_get_wrapper(cpv, [])
1255                 return matches
1256
1257         def _aux_get_wrapper(self, pkg, wants):
1258                 if pkg in self._aux_get_history:
1259                         return self._aux_get(pkg, wants)
1260                 self._aux_get_history.add(pkg)
1261                 try:
1262                         # Use the live ebuild metadata if possible.
1263                         live_metadata = dict(izip(self._portdb_keys,
1264                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1265                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1266                                 raise KeyError(pkg)
1267                         self.dbapi.aux_update(pkg, live_metadata)
1268                 except (KeyError, portage.exception.PortageException):
1269                         if self._global_updates is None:
1270                                 self._global_updates = \
1271                                         grab_global_updates(self._portdb.porttree_root)
1272                         perform_global_updates(
1273                                 pkg, self.dbapi, self._global_updates)
1274                 return self._aux_get(pkg, wants)
1275
1276         def sync(self, acquire_lock=1):
1277                 """
1278                 Call this method to synchronize state with the real vardb
1279                 after one or more packages may have been installed or
1280                 uninstalled.
1281                 """
1282                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1283                 try:
1284                         # At least the parent needs to exist for the lock file.
1285                         portage.util.ensure_dirs(vdb_path)
1286                 except portage.exception.PortageException:
1287                         pass
1288                 vdb_lock = None
1289                 try:
1290                         if acquire_lock and os.access(vdb_path, os.W_OK):
1291                                 vdb_lock = portage.locks.lockdir(vdb_path)
1292                         self._sync()
1293                 finally:
1294                         if vdb_lock:
1295                                 portage.locks.unlockdir(vdb_lock)
1296
1297         def _sync(self):
1298
1299                 real_vardb = self._root_config.trees["vartree"].dbapi
1300                 current_cpv_set = frozenset(real_vardb.cpv_all())
1301                 pkg_vardb = self.dbapi
1302                 aux_get_history = self._aux_get_history
1303
1304                 # Remove any packages that have been uninstalled.
1305                 for pkg in list(pkg_vardb):
1306                         if pkg.cpv not in current_cpv_set:
1307                                 pkg_vardb.cpv_remove(pkg)
1308                                 aux_get_history.discard(pkg.cpv)
1309
1310                 # Validate counters and timestamps.
1311                 slot_counters = {}
1312                 root = self.root
1313                 validation_keys = ["COUNTER", "_mtime_"]
1314                 for cpv in current_cpv_set:
1315
1316                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1317                         pkg = pkg_vardb.get(pkg_hash_key)
1318                         if pkg is not None:
1319                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1320                                 try:
1321                                         counter = long(counter)
1322                                 except ValueError:
1323                                         counter = 0
1324
1325                                 if counter != pkg.counter or \
1326                                         mtime != pkg.mtime:
1327                                         pkg_vardb.cpv_remove(pkg)
1328                                         aux_get_history.discard(pkg.cpv)
1329                                         pkg = None
1330
1331                         if pkg is None:
1332                                 pkg = self._pkg(cpv)
1333
1334                         other_counter = slot_counters.get(pkg.slot_atom)
1335                         if other_counter is not None:
1336                                 if other_counter > pkg.counter:
1337                                         continue
1338
1339                         slot_counters[pkg.slot_atom] = pkg.counter
1340                         pkg_vardb.cpv_inject(pkg)
1341
1342                 real_vardb.flush_cache()
1343
1344         def _pkg(self, cpv):
1345                 root_config = self._root_config
1346                 real_vardb = root_config.trees["vartree"].dbapi
1347                 pkg = Package(cpv=cpv, installed=True,
1348                         metadata=izip(self._db_keys,
1349                         real_vardb.aux_get(cpv, self._db_keys)),
1350                         root_config=root_config,
1351                         type_name="installed")
1352
1353                 try:
1354                         mycounter = long(pkg.metadata["COUNTER"])
1355                 except ValueError:
1356                         mycounter = 0
1357                         pkg.metadata["COUNTER"] = str(mycounter)
1358
1359                 return pkg
1360
1361 def grab_global_updates(portdir):
1362         from portage.update import grab_updates, parse_updates
1363         updpath = os.path.join(portdir, "profiles", "updates")
1364         try:
1365                 rawupdates = grab_updates(updpath)
1366         except portage.exception.DirectoryNotFound:
1367                 rawupdates = []
1368         upd_commands = []
1369         for mykey, mystat, mycontent in rawupdates:
1370                 commands, errors = parse_updates(mycontent)
1371                 upd_commands.extend(commands)
1372         return upd_commands
1373
1374 def perform_global_updates(mycpv, mydb, mycommands):
1375         from portage.update import update_dbentries
1376         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1377         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1378         updates = update_dbentries(mycommands, aux_dict)
1379         if updates:
1380                 mydb.aux_update(mycpv, updates)
1381
1382 def visible(pkgsettings, pkg):
1383         """
1384         Check if a package is visible. This can raise an InvalidDependString
1385         exception if LICENSE is invalid.
1386         TODO: optionally generate a list of masking reasons
1387         @rtype: Boolean
1388         @returns: True if the package is visible, False otherwise.
1389         """
1390         if not pkg.metadata["SLOT"]:
1391                 return False
1392         if not pkg.installed:
1393                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1394                         return False
1395         eapi = pkg.metadata["EAPI"]
1396         if not portage.eapi_is_supported(eapi):
1397                 return False
1398         if not pkg.installed:
1399                 if portage._eapi_is_deprecated(eapi):
1400                         return False
1401                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1402                         return False
1403         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406                 return False
1407         try:
1408                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1409                         return False
1410         except portage.exception.InvalidDependString:
1411                 return False
1412         return True
1413
1414 def get_masking_status(pkg, pkgsettings, root_config):
1415
1416         mreasons = portage.getmaskingstatus(
1417                 pkg, settings=pkgsettings,
1418                 portdb=root_config.trees["porttree"].dbapi)
1419
1420         if not pkg.installed:
1421                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1422                         mreasons.append("CHOST: %s" % \
1423                                 pkg.metadata["CHOST"])
1424
1425         if not pkg.metadata["SLOT"]:
1426                 mreasons.append("invalid: SLOT is undefined")
1427
1428         return mreasons
1429
1430 def get_mask_info(root_config, cpv, pkgsettings,
1431         db, pkg_type, built, installed, db_keys):
1432         eapi_masked = False
1433         try:
1434                 metadata = dict(izip(db_keys,
1435                         db.aux_get(cpv, db_keys)))
1436         except KeyError:
1437                 metadata = None
1438         if metadata and not built:
1439                 pkgsettings.setcpv(cpv, mydb=metadata)
1440                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1441                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1442         if metadata is None:
1443                 mreasons = ["corruption"]
1444         else:
1445                 eapi = metadata['EAPI']
1446                 if eapi[:1] == '-':
1447                         eapi = eapi[1:]
1448                 if not portage.eapi_is_supported(eapi):
1449                         mreasons = ['EAPI %s' % eapi]
1450                 else:
1451                         pkg = Package(type_name=pkg_type, root_config=root_config,
1452                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1453                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1454         return metadata, mreasons
1455
1456 def show_masked_packages(masked_packages):
1457         shown_licenses = set()
1458         shown_comments = set()
1459         # Maybe there is both an ebuild and a binary. Only
1460         # show one of them to avoid redundant appearance.
1461         shown_cpvs = set()
1462         have_eapi_mask = False
1463         for (root_config, pkgsettings, cpv,
1464                 metadata, mreasons) in masked_packages:
1465                 if cpv in shown_cpvs:
1466                         continue
1467                 shown_cpvs.add(cpv)
1468                 comment, filename = None, None
1469                 if "package.mask" in mreasons:
1470                         comment, filename = \
1471                                 portage.getmaskingreason(
1472                                 cpv, metadata=metadata,
1473                                 settings=pkgsettings,
1474                                 portdb=root_config.trees["porttree"].dbapi,
1475                                 return_location=True)
1476                 missing_licenses = []
1477                 if metadata:
1478                         if not portage.eapi_is_supported(metadata["EAPI"]):
1479                                 have_eapi_mask = True
1480                         try:
1481                                 missing_licenses = \
1482                                         pkgsettings._getMissingLicenses(
1483                                                 cpv, metadata)
1484                         except portage.exception.InvalidDependString:
1485                                 # This will have already been reported
1486                                 # above via mreasons.
1487                                 pass
1488
1489                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1490                 if comment and comment not in shown_comments:
1491                         print filename+":"
1492                         print comment
1493                         shown_comments.add(comment)
1494                 portdb = root_config.trees["porttree"].dbapi
1495                 for l in missing_licenses:
1496                         l_path = portdb.findLicensePath(l)
1497                         if l in shown_licenses:
1498                                 continue
1499                         msg = ("A copy of the '%s' license" + \
1500                         " is located at '%s'.") % (l, l_path)
1501                         print msg
1502                         print
1503                         shown_licenses.add(l)
1504         return have_eapi_mask
1505
1506 class Task(SlotObject):
1507         __slots__ = ("_hash_key", "_hash_value")
1508
1509         def _get_hash_key(self):
1510                 hash_key = getattr(self, "_hash_key", None)
1511                 if hash_key is None:
1512                         raise NotImplementedError(self)
1513                 return hash_key
1514
1515         def __eq__(self, other):
1516                 return self._get_hash_key() == other
1517
1518         def __ne__(self, other):
1519                 return self._get_hash_key() != other
1520
1521         def __hash__(self):
1522                 hash_value = getattr(self, "_hash_value", None)
1523                 if hash_value is None:
1524                         self._hash_value = hash(self._get_hash_key())
1525                 return self._hash_value
1526
1527         def __len__(self):
1528                 return len(self._get_hash_key())
1529
1530         def __getitem__(self, key):
1531                 return self._get_hash_key()[key]
1532
1533         def __iter__(self):
1534                 return iter(self._get_hash_key())
1535
1536         def __contains__(self, key):
1537                 return key in self._get_hash_key()
1538
1539         def __str__(self):
1540                 return str(self._get_hash_key())
1541
1542 class Blocker(Task):
1543
1544         __hash__ = Task.__hash__
1545         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1546
1547         def __init__(self, **kwargs):
1548                 Task.__init__(self, **kwargs)
1549                 self.cp = portage.dep_getkey(self.atom)
1550
1551         def _get_hash_key(self):
1552                 hash_key = getattr(self, "_hash_key", None)
1553                 if hash_key is None:
1554                         self._hash_key = \
1555                                 ("blocks", self.root, self.atom, self.eapi)
1556                 return self._hash_key
1557
1558 class Package(Task):
1559
1560         __hash__ = Task.__hash__
1561         __slots__ = ("built", "cpv", "depth",
1562                 "installed", "metadata", "onlydeps", "operation",
1563                 "root_config", "type_name",
1564                 "category", "counter", "cp", "cpv_split",
1565                 "inherited", "iuse", "mtime",
1566                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1567
1568         metadata_keys = [
1569                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1570                 "INHERITED", "IUSE", "KEYWORDS",
1571                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1572                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1573
1574         def __init__(self, **kwargs):
1575                 Task.__init__(self, **kwargs)
1576                 self.root = self.root_config.root
1577                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1578                 self.cp = portage.cpv_getkey(self.cpv)
1579                 slot = self.slot
1580                 if not slot:
1581                         # Avoid an InvalidAtom exception when creating slot_atom.
1582                         # This package instance will be masked due to empty SLOT.
1583                         slot = '0'
1584                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1585                 self.category, self.pf = portage.catsplit(self.cpv)
1586                 self.cpv_split = portage.catpkgsplit(self.cpv)
1587                 self.pv_split = self.cpv_split[1:]
1588
1589         class _use(object):
1590
1591                 __slots__ = ("__weakref__", "enabled")
1592
1593                 def __init__(self, use):
1594                         self.enabled = frozenset(use)
1595
1596         class _iuse(object):
1597
1598                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1599
1600                 def __init__(self, tokens, iuse_implicit):
1601                         self.tokens = tuple(tokens)
1602                         self.iuse_implicit = iuse_implicit
1603                         enabled = []
1604                         disabled = []
1605                         other = []
1606                         for x in tokens:
1607                                 prefix = x[:1]
1608                                 if prefix == "+":
1609                                         enabled.append(x[1:])
1610                                 elif prefix == "-":
1611                                         disabled.append(x[1:])
1612                                 else:
1613                                         other.append(x)
1614                         self.enabled = frozenset(enabled)
1615                         self.disabled = frozenset(disabled)
1616                         self.all = frozenset(chain(enabled, disabled, other))
1617
1618                 def __getattribute__(self, name):
1619                         if name == "regex":
1620                                 try:
1621                                         return object.__getattribute__(self, "regex")
1622                                 except AttributeError:
1623                                         all = object.__getattribute__(self, "all")
1624                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1625                                         # Escape anything except ".*" which is supposed
1626                                         # to pass through from _get_implicit_iuse()
1627                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1628                                         regex = "^(%s)$" % "|".join(regex)
1629                                         regex = regex.replace("\\.\\*", ".*")
1630                                         self.regex = re.compile(regex)
1631                         return object.__getattribute__(self, name)
1632
1633         def _get_hash_key(self):
1634                 hash_key = getattr(self, "_hash_key", None)
1635                 if hash_key is None:
1636                         if self.operation is None:
1637                                 self.operation = "merge"
1638                                 if self.onlydeps or self.installed:
1639                                         self.operation = "nomerge"
1640                         self._hash_key = \
1641                                 (self.type_name, self.root, self.cpv, self.operation)
1642                 return self._hash_key
1643
1644         def __lt__(self, other):
1645                 if other.cp != self.cp:
1646                         return False
1647                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1648                         return True
1649                 return False
1650
1651         def __le__(self, other):
1652                 if other.cp != self.cp:
1653                         return False
1654                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1655                         return True
1656                 return False
1657
1658         def __gt__(self, other):
1659                 if other.cp != self.cp:
1660                         return False
1661                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1662                         return True
1663                 return False
1664
1665         def __ge__(self, other):
1666                 if other.cp != self.cp:
1667                         return False
1668                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1669                         return True
1670                 return False
1671
1672 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1673         if not x.startswith("UNUSED_"))
1674 _all_metadata_keys.discard("CDEPEND")
1675 _all_metadata_keys.update(Package.metadata_keys)
1676
1677 from portage.cache.mappings import slot_dict_class
1678 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1679
1680 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1681         """
1682         Detect metadata updates and synchronize Package attributes.
1683         """
1684
1685         __slots__ = ("_pkg",)
1686         _wrapped_keys = frozenset(
1687                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1688
1689         def __init__(self, pkg, metadata):
1690                 _PackageMetadataWrapperBase.__init__(self)
1691                 self._pkg = pkg
1692                 self.update(metadata)
1693
1694         def __setitem__(self, k, v):
1695                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1696                 if k in self._wrapped_keys:
1697                         getattr(self, "_set_" + k.lower())(k, v)
1698
1699         def _set_inherited(self, k, v):
1700                 if isinstance(v, basestring):
1701                         v = frozenset(v.split())
1702                 self._pkg.inherited = v
1703
1704         def _set_iuse(self, k, v):
1705                 self._pkg.iuse = self._pkg._iuse(
1706                         v.split(), self._pkg.root_config.iuse_implicit)
1707
1708         def _set_slot(self, k, v):
1709                 self._pkg.slot = v
1710
1711         def _set_use(self, k, v):
1712                 self._pkg.use = self._pkg._use(v.split())
1713
1714         def _set_counter(self, k, v):
1715                 if isinstance(v, basestring):
1716                         try:
1717                                 v = long(v.strip())
1718                         except ValueError:
1719                                 v = 0
1720                 self._pkg.counter = v
1721
1722         def _set__mtime_(self, k, v):
1723                 if isinstance(v, basestring):
1724                         try:
1725                                 v = long(v.strip())
1726                         except ValueError:
1727                                 v = 0
1728                 self._pkg.mtime = v
1729
1730 class EbuildFetchonly(SlotObject):
1731
1732         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1733
1734         def execute(self):
1735                 settings = self.settings
1736                 pkg = self.pkg
1737                 portdb = pkg.root_config.trees["porttree"].dbapi
1738                 ebuild_path = portdb.findname(pkg.cpv)
1739                 settings.setcpv(pkg)
1740                 debug = settings.get("PORTAGE_DEBUG") == "1"
1741                 use_cache = 1 # always true
1742                 portage.doebuild_environment(ebuild_path, "fetch",
1743                         settings["ROOT"], settings, debug, use_cache, portdb)
1744                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1745
1746                 if restrict_fetch:
1747                         rval = self._execute_with_builddir()
1748                 else:
1749                         rval = portage.doebuild(ebuild_path, "fetch",
1750                                 settings["ROOT"], settings, debug=debug,
1751                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1752                                 mydbapi=portdb, tree="porttree")
1753
1754                         if rval != os.EX_OK:
1755                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1756                                 eerror(msg, phase="unpack", key=pkg.cpv)
1757
1758                 return rval
1759
1760         def _execute_with_builddir(self):
1761                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1762                 # ensuring sane $PWD (bug #239560) and storing elog
1763                 # messages. Use a private temp directory, in order
1764                 # to avoid locking the main one.
1765                 settings = self.settings
1766                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1767                 from tempfile import mkdtemp
1768                 try:
1769                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1770                 except OSError, e:
1771                         if e.errno != portage.exception.PermissionDenied.errno:
1772                                 raise
1773                         raise portage.exception.PermissionDenied(global_tmpdir)
1774                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1775                 settings.backup_changes("PORTAGE_TMPDIR")
1776                 try:
1777                         retval = self._execute()
1778                 finally:
1779                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1780                         settings.backup_changes("PORTAGE_TMPDIR")
1781                         shutil.rmtree(private_tmpdir)
1782                 return retval
1783
1784         def _execute(self):
1785                 settings = self.settings
1786                 pkg = self.pkg
1787                 root_config = pkg.root_config
1788                 portdb = root_config.trees["porttree"].dbapi
1789                 ebuild_path = portdb.findname(pkg.cpv)
1790                 debug = settings.get("PORTAGE_DEBUG") == "1"
1791                 retval = portage.doebuild(ebuild_path, "fetch",
1792                         self.settings["ROOT"], self.settings, debug=debug,
1793                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1794                         mydbapi=portdb, tree="porttree")
1795
1796                 if retval != os.EX_OK:
1797                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1798                         eerror(msg, phase="unpack", key=pkg.cpv)
1799
1800                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1801                 return retval
1802
1803 class PollConstants(object):
1804
1805         """
1806         Provides POLL* constants that are equivalent to those from the
1807         select module, for use by PollSelectAdapter.
1808         """
1809
1810         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1811         v = 1
1812         for k in names:
1813                 locals()[k] = getattr(select, k, v)
1814                 v *= 2
1815         del k, v
1816
1817 class AsynchronousTask(SlotObject):
1818         """
1819         Subclasses override _wait() and _poll() so that calls
1820         to public methods can be wrapped for implementing
1821         hooks such as exit listener notification.
1822
1823         Sublasses should call self.wait() to notify exit listeners after
1824         the task is complete and self.returncode has been set.
1825         """
1826
1827         __slots__ = ("background", "cancelled", "returncode") + \
1828                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1829
1830         def start(self):
1831                 """
1832                 Start an asynchronous task and then return as soon as possible.
1833                 """
1834                 self._start_hook()
1835                 self._start()
1836
1837         def _start(self):
1838                 raise NotImplementedError(self)
1839
1840         def isAlive(self):
1841                 return self.returncode is None
1842
1843         def poll(self):
1844                 self._wait_hook()
1845                 return self._poll()
1846
1847         def _poll(self):
1848                 return self.returncode
1849
1850         def wait(self):
1851                 if self.returncode is None:
1852                         self._wait()
1853                 self._wait_hook()
1854                 return self.returncode
1855
1856         def _wait(self):
1857                 return self.returncode
1858
1859         def cancel(self):
1860                 self.cancelled = True
1861                 self.wait()
1862
1863         def addStartListener(self, f):
1864                 """
1865                 The function will be called with one argument, a reference to self.
1866                 """
1867                 if self._start_listeners is None:
1868                         self._start_listeners = []
1869                 self._start_listeners.append(f)
1870
1871         def removeStartListener(self, f):
1872                 if self._start_listeners is None:
1873                         return
1874                 self._start_listeners.remove(f)
1875
1876         def _start_hook(self):
1877                 if self._start_listeners is not None:
1878                         start_listeners = self._start_listeners
1879                         self._start_listeners = None
1880
1881                         for f in start_listeners:
1882                                 f(self)
1883
1884         def addExitListener(self, f):
1885                 """
1886                 The function will be called with one argument, a reference to self.
1887                 """
1888                 if self._exit_listeners is None:
1889                         self._exit_listeners = []
1890                 self._exit_listeners.append(f)
1891
1892         def removeExitListener(self, f):
1893                 if self._exit_listeners is None:
1894                         if self._exit_listener_stack is not None:
1895                                 self._exit_listener_stack.remove(f)
1896                         return
1897                 self._exit_listeners.remove(f)
1898
1899         def _wait_hook(self):
1900                 """
1901                 Call this method after the task completes, just before returning
1902                 the returncode from wait() or poll(). This hook is
1903                 used to trigger exit listeners when the returncode first
1904                 becomes available.
1905                 """
1906                 if self.returncode is not None and \
1907                         self._exit_listeners is not None:
1908
1909                         # This prevents recursion, in case one of the
1910                         # exit handlers triggers this method again by
1911                         # calling wait(). Use a stack that gives
1912                         # removeExitListener() an opportunity to consume
1913                         # listeners from the stack, before they can get
1914                         # called below. This is necessary because a call
1915                         # to one exit listener may result in a call to
1916                         # removeExitListener() for another listener on
1917                         # the stack. That listener needs to be removed
1918                         # from the stack since it would be inconsistent
1919                         # to call it after it has been been passed into
1920                         # removeExitListener().
1921                         self._exit_listener_stack = self._exit_listeners
1922                         self._exit_listeners = None
1923
1924                         self._exit_listener_stack.reverse()
1925                         while self._exit_listener_stack:
1926                                 self._exit_listener_stack.pop()(self)
1927
1928 class AbstractPollTask(AsynchronousTask):
1929
1930         __slots__ = ("scheduler",) + \
1931                 ("_registered",)
1932
1933         _bufsize = 4096
1934         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1935         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1936                 _exceptional_events
1937
1938         def _unregister(self):
1939                 raise NotImplementedError(self)
1940
1941         def _unregister_if_appropriate(self, event):
1942                 if self._registered:
1943                         if event & self._exceptional_events:
1944                                 self._unregister()
1945                                 self.cancel()
1946                         elif event & PollConstants.POLLHUP:
1947                                 self._unregister()
1948                                 self.wait()
1949
1950 class PipeReader(AbstractPollTask):
1951
1952         """
1953         Reads output from one or more files and saves it in memory,
1954         for retrieval via the getvalue() method. This is driven by
1955         the scheduler's poll() loop, so it runs entirely within the
1956         current process.
1957         """
1958
1959         __slots__ = ("input_files",) + \
1960                 ("_read_data", "_reg_ids")
1961
1962         def _start(self):
1963                 self._reg_ids = set()
1964                 self._read_data = []
1965                 for k, f in self.input_files.iteritems():
1966                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1967                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1968                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1969                                 self._registered_events, self._output_handler))
1970                 self._registered = True
1971
1972         def isAlive(self):
1973                 return self._registered
1974
1975         def cancel(self):
1976                 if self.returncode is None:
1977                         self.returncode = 1
1978                         self.cancelled = True
1979                 self.wait()
1980
1981         def _wait(self):
1982                 if self.returncode is not None:
1983                         return self.returncode
1984
1985                 if self._registered:
1986                         self.scheduler.schedule(self._reg_ids)
1987                         self._unregister()
1988
1989                 self.returncode = os.EX_OK
1990                 return self.returncode
1991
1992         def getvalue(self):
1993                 """Retrieve the entire contents"""
1994                 if sys.hexversion >= 0x3000000:
1995                         return bytes().join(self._read_data)
1996                 return "".join(self._read_data)
1997
1998         def close(self):
1999                 """Free the memory buffer."""
2000                 self._read_data = None
2001
2002         def _output_handler(self, fd, event):
2003
2004                 if event & PollConstants.POLLIN:
2005
2006                         for f in self.input_files.itervalues():
2007                                 if fd == f.fileno():
2008                                         break
2009
2010                         buf = array.array('B')
2011                         try:
2012                                 buf.fromfile(f, self._bufsize)
2013                         except EOFError:
2014                                 pass
2015
2016                         if buf:
2017                                 self._read_data.append(buf.tostring())
2018                         else:
2019                                 self._unregister()
2020                                 self.wait()
2021
2022                 self._unregister_if_appropriate(event)
2023                 return self._registered
2024
2025         def _unregister(self):
2026                 """
2027                 Unregister from the scheduler and close open files.
2028                 """
2029
2030                 self._registered = False
2031
2032                 if self._reg_ids is not None:
2033                         for reg_id in self._reg_ids:
2034                                 self.scheduler.unregister(reg_id)
2035                         self._reg_ids = None
2036
2037                 if self.input_files is not None:
2038                         for f in self.input_files.itervalues():
2039                                 f.close()
2040                         self.input_files = None
2041
2042 class CompositeTask(AsynchronousTask):
2043
2044         __slots__ = ("scheduler",) + ("_current_task",)
2045
2046         def isAlive(self):
2047                 return self._current_task is not None
2048
2049         def cancel(self):
2050                 self.cancelled = True
2051                 if self._current_task is not None:
2052                         self._current_task.cancel()
2053
2054         def _poll(self):
2055                 """
2056                 This does a loop calling self._current_task.poll()
2057                 repeatedly as long as the value of self._current_task
2058                 keeps changing. It calls poll() a maximum of one time
2059                 for a given self._current_task instance. This is useful
2060                 since calling poll() on a task can trigger advance to
2061                 the next task could eventually lead to the returncode
2062                 being set in cases when polling only a single task would
2063                 not have the same effect.
2064                 """
2065
2066                 prev = None
2067                 while True:
2068                         task = self._current_task
2069                         if task is None or task is prev:
2070                                 # don't poll the same task more than once
2071                                 break
2072                         task.poll()
2073                         prev = task
2074
2075                 return self.returncode
2076
2077         def _wait(self):
2078
2079                 prev = None
2080                 while True:
2081                         task = self._current_task
2082                         if task is None:
2083                                 # don't wait for the same task more than once
2084                                 break
2085                         if task is prev:
2086                                 # Before the task.wait() method returned, an exit
2087                                 # listener should have set self._current_task to either
2088                                 # a different task or None. Something is wrong.
2089                                 raise AssertionError("self._current_task has not " + \
2090                                         "changed since calling wait", self, task)
2091                         task.wait()
2092                         prev = task
2093
2094                 return self.returncode
2095
2096         def _assert_current(self, task):
2097                 """
2098                 Raises an AssertionError if the given task is not the
2099                 same one as self._current_task. This can be useful
2100                 for detecting bugs.
2101                 """
2102                 if task is not self._current_task:
2103                         raise AssertionError("Unrecognized task: %s" % (task,))
2104
2105         def _default_exit(self, task):
2106                 """
2107                 Calls _assert_current() on the given task and then sets the
2108                 composite returncode attribute if task.returncode != os.EX_OK.
2109                 If the task failed then self._current_task will be set to None.
2110                 Subclasses can use this as a generic task exit callback.
2111
2112                 @rtype: int
2113                 @returns: The task.returncode attribute.
2114                 """
2115                 self._assert_current(task)
2116                 if task.returncode != os.EX_OK:
2117                         self.returncode = task.returncode
2118                         self._current_task = None
2119                 return task.returncode
2120
2121         def _final_exit(self, task):
2122                 """
2123                 Assumes that task is the final task of this composite task.
2124                 Calls _default_exit() and sets self.returncode to the task's
2125                 returncode and sets self._current_task to None.
2126                 """
2127                 self._default_exit(task)
2128                 self._current_task = None
2129                 self.returncode = task.returncode
2130                 return self.returncode
2131
2132         def _default_final_exit(self, task):
2133                 """
2134                 This calls _final_exit() and then wait().
2135
2136                 Subclasses can use this as a generic final task exit callback.
2137
2138                 """
2139                 self._final_exit(task)
2140                 return self.wait()
2141
2142         def _start_task(self, task, exit_handler):
2143                 """
2144                 Register exit handler for the given task, set it
2145                 as self._current_task, and call task.start().
2146
2147                 Subclasses can use this as a generic way to start
2148                 a task.
2149
2150                 """
2151                 task.addExitListener(exit_handler)
2152                 self._current_task = task
2153                 task.start()
2154
2155 class TaskSequence(CompositeTask):
2156         """
2157         A collection of tasks that executes sequentially. Each task
2158         must have a addExitListener() method that can be used as
2159         a means to trigger movement from one task to the next.
2160         """
2161
2162         __slots__ = ("_task_queue",)
2163
2164         def __init__(self, **kwargs):
2165                 AsynchronousTask.__init__(self, **kwargs)
2166                 self._task_queue = deque()
2167
2168         def add(self, task):
2169                 self._task_queue.append(task)
2170
2171         def _start(self):
2172                 self._start_next_task()
2173
2174         def cancel(self):
2175                 self._task_queue.clear()
2176                 CompositeTask.cancel(self)
2177
2178         def _start_next_task(self):
2179                 self._start_task(self._task_queue.popleft(),
2180                         self._task_exit_handler)
2181
2182         def _task_exit_handler(self, task):
2183                 if self._default_exit(task) != os.EX_OK:
2184                         self.wait()
2185                 elif self._task_queue:
2186                         self._start_next_task()
2187                 else:
2188                         self._final_exit(task)
2189                         self.wait()
2190
2191 class SubProcess(AbstractPollTask):
2192
2193         __slots__ = ("pid",) + \
2194                 ("_files", "_reg_id")
2195
2196         # A file descriptor is required for the scheduler to monitor changes from
2197         # inside a poll() loop. When logging is not enabled, create a pipe just to
2198         # serve this purpose alone.
2199         _dummy_pipe_fd = 9
2200
2201         def _poll(self):
2202                 if self.returncode is not None:
2203                         return self.returncode
2204                 if self.pid is None:
2205                         return self.returncode
2206                 if self._registered:
2207                         return self.returncode
2208
2209                 try:
2210                         retval = os.waitpid(self.pid, os.WNOHANG)
2211                 except OSError, e:
2212                         if e.errno != errno.ECHILD:
2213                                 raise
2214                         del e
2215                         retval = (self.pid, 1)
2216
2217                 if retval == (0, 0):
2218                         return None
2219                 self._set_returncode(retval)
2220                 return self.returncode
2221
2222         def cancel(self):
2223                 if self.isAlive():
2224                         try:
2225                                 os.kill(self.pid, signal.SIGTERM)
2226                         except OSError, e:
2227                                 if e.errno != errno.ESRCH:
2228                                         raise
2229                                 del e
2230
2231                 self.cancelled = True
2232                 if self.pid is not None:
2233                         self.wait()
2234                 return self.returncode
2235
2236         def isAlive(self):
2237                 return self.pid is not None and \
2238                         self.returncode is None
2239
2240         def _wait(self):
2241
2242                 if self.returncode is not None:
2243                         return self.returncode
2244
2245                 if self._registered:
2246                         self.scheduler.schedule(self._reg_id)
2247                         self._unregister()
2248                         if self.returncode is not None:
2249                                 return self.returncode
2250
2251                 try:
2252                         wait_retval = os.waitpid(self.pid, 0)
2253                 except OSError, e:
2254                         if e.errno != errno.ECHILD:
2255                                 raise
2256                         del e
2257                         self._set_returncode((self.pid, 1))
2258                 else:
2259                         self._set_returncode(wait_retval)
2260
2261                 return self.returncode
2262
2263         def _unregister(self):
2264                 """
2265                 Unregister from the scheduler and close open files.
2266                 """
2267
2268                 self._registered = False
2269
2270                 if self._reg_id is not None:
2271                         self.scheduler.unregister(self._reg_id)
2272                         self._reg_id = None
2273
2274                 if self._files is not None:
2275                         for f in self._files.itervalues():
2276                                 f.close()
2277                         self._files = None
2278
2279         def _set_returncode(self, wait_retval):
2280
2281                 retval = wait_retval[1]
2282
2283                 if retval != os.EX_OK:
2284                         if retval & 0xff:
2285                                 retval = (retval & 0xff) << 8
2286                         else:
2287                                 retval = retval >> 8
2288
2289                 self.returncode = retval
2290
2291 class SpawnProcess(SubProcess):
2292
2293         """
2294         Constructor keyword args are passed into portage.process.spawn().
2295         The required "args" keyword argument will be passed as the first
2296         spawn() argument.
2297         """
2298
2299         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2300                 "uid", "gid", "groups", "umask", "logfile",
2301                 "path_lookup", "pre_exec")
2302
2303         __slots__ = ("args",) + \
2304                 _spawn_kwarg_names
2305
2306         _file_names = ("log", "process", "stdout")
2307         _files_dict = slot_dict_class(_file_names, prefix="")
2308
2309         def _start(self):
2310
2311                 if self.cancelled:
2312                         return
2313
2314                 if self.fd_pipes is None:
2315                         self.fd_pipes = {}
2316                 fd_pipes = self.fd_pipes
2317                 fd_pipes.setdefault(0, sys.stdin.fileno())
2318                 fd_pipes.setdefault(1, sys.stdout.fileno())
2319                 fd_pipes.setdefault(2, sys.stderr.fileno())
2320
2321                 # flush any pending output
2322                 for fd in fd_pipes.itervalues():
2323                         if fd == sys.stdout.fileno():
2324                                 sys.stdout.flush()
2325                         if fd == sys.stderr.fileno():
2326                                 sys.stderr.flush()
2327
2328                 logfile = self.logfile
2329                 self._files = self._files_dict()
2330                 files = self._files
2331
2332                 master_fd, slave_fd = self._pipe(fd_pipes)
2333                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2334                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2335
2336                 null_input = None
2337                 fd_pipes_orig = fd_pipes.copy()
2338                 if self.background:
2339                         # TODO: Use job control functions like tcsetpgrp() to control
2340                         # access to stdin. Until then, use /dev/null so that any
2341                         # attempts to read from stdin will immediately return EOF
2342                         # instead of blocking indefinitely.
2343                         null_input = open('/dev/null', 'rb')
2344                         fd_pipes[0] = null_input.fileno()
2345                 else:
2346                         fd_pipes[0] = fd_pipes_orig[0]
2347
2348                 files.process = os.fdopen(master_fd, 'rb')
2349                 if logfile is not None:
2350
2351                         fd_pipes[1] = slave_fd
2352                         fd_pipes[2] = slave_fd
2353
2354                         files.log = open(logfile, mode='ab')
2355                         portage.util.apply_secpass_permissions(logfile,
2356                                 uid=portage.portage_uid, gid=portage.portage_gid,
2357                                 mode=0660)
2358
2359                         if not self.background:
2360                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2361
2362                         output_handler = self._output_handler
2363
2364                 else:
2365
2366                         # Create a dummy pipe so the scheduler can monitor
2367                         # the process from inside a poll() loop.
2368                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2369                         if self.background:
2370                                 fd_pipes[1] = slave_fd
2371                                 fd_pipes[2] = slave_fd
2372                         output_handler = self._dummy_handler
2373
2374                 kwargs = {}
2375                 for k in self._spawn_kwarg_names:
2376                         v = getattr(self, k)
2377                         if v is not None:
2378                                 kwargs[k] = v
2379
2380                 kwargs["fd_pipes"] = fd_pipes
2381                 kwargs["returnpid"] = True
2382                 kwargs.pop("logfile", None)
2383
2384                 self._reg_id = self.scheduler.register(files.process.fileno(),
2385                         self._registered_events, output_handler)
2386                 self._registered = True
2387
2388                 retval = self._spawn(self.args, **kwargs)
2389
2390                 os.close(slave_fd)
2391                 if null_input is not None:
2392                         null_input.close()
2393
2394                 if isinstance(retval, int):
2395                         # spawn failed
2396                         self._unregister()
2397                         self.returncode = retval
2398                         self.wait()
2399                         return
2400
2401                 self.pid = retval[0]
2402                 portage.process.spawned_pids.remove(self.pid)
2403
2404         def _pipe(self, fd_pipes):
2405                 """
2406                 @type fd_pipes: dict
2407                 @param fd_pipes: pipes from which to copy terminal size if desired.
2408                 """
2409                 return os.pipe()
2410
2411         def _spawn(self, args, **kwargs):
2412                 return portage.process.spawn(args, **kwargs)
2413
2414         def _output_handler(self, fd, event):
2415
2416                 if event & PollConstants.POLLIN:
2417
2418                         files = self._files
2419                         buf = array.array('B')
2420                         try:
2421                                 buf.fromfile(files.process, self._bufsize)
2422                         except EOFError:
2423                                 pass
2424
2425                         if buf:
2426                                 if not self.background:
2427                                         buf.tofile(files.stdout)
2428                                         files.stdout.flush()
2429                                 buf.tofile(files.log)
2430                                 files.log.flush()
2431                         else:
2432                                 self._unregister()
2433                                 self.wait()
2434
2435                 self._unregister_if_appropriate(event)
2436                 return self._registered
2437
2438         def _dummy_handler(self, fd, event):
2439                 """
2440                 This method is mainly interested in detecting EOF, since
2441                 the only purpose of the pipe is to allow the scheduler to
2442                 monitor the process from inside a poll() loop.
2443                 """
2444
2445                 if event & PollConstants.POLLIN:
2446
2447                         buf = array.array('B')
2448                         try:
2449                                 buf.fromfile(self._files.process, self._bufsize)
2450                         except EOFError:
2451                                 pass
2452
2453                         if buf:
2454                                 pass
2455                         else:
2456                                 self._unregister()
2457                                 self.wait()
2458
2459                 self._unregister_if_appropriate(event)
2460                 return self._registered
2461
2462 class MiscFunctionsProcess(SpawnProcess):
2463         """
2464         Spawns misc-functions.sh with an existing ebuild environment.
2465         """
2466
2467         __slots__ = ("commands", "phase", "pkg", "settings")
2468
2469         def _start(self):
2470                 settings = self.settings
2471                 settings.pop("EBUILD_PHASE", None)
2472                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2473                 misc_sh_binary = os.path.join(portage_bin_path,
2474                         os.path.basename(portage.const.MISC_SH_BINARY))
2475
2476                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2477                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2478
2479                 portage._doebuild_exit_status_unlink(
2480                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2481
2482                 SpawnProcess._start(self)
2483
2484         def _spawn(self, args, **kwargs):
2485                 settings = self.settings
2486                 debug = settings.get("PORTAGE_DEBUG") == "1"
2487                 return portage.spawn(" ".join(args), settings,
2488                         debug=debug, **kwargs)
2489
2490         def _set_returncode(self, wait_retval):
2491                 SpawnProcess._set_returncode(self, wait_retval)
2492                 self.returncode = portage._doebuild_exit_status_check_and_log(
2493                         self.settings, self.phase, self.returncode)
2494
2495 class EbuildFetcher(SpawnProcess):
2496
2497         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2498                 ("_build_dir",)
2499
2500         def _start(self):
2501
2502                 root_config = self.pkg.root_config
2503                 portdb = root_config.trees["porttree"].dbapi
2504                 ebuild_path = portdb.findname(self.pkg.cpv)
2505                 settings = self.config_pool.allocate()
2506                 settings.setcpv(self.pkg)
2507
2508                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2509                 # should not be touched since otherwise it could interfere with
2510                 # another instance of the same cpv concurrently being built for a
2511                 # different $ROOT (currently, builds only cooperate with prefetchers
2512                 # that are spawned for the same $ROOT).
2513                 if not self.prefetch:
2514                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2515                         self._build_dir.lock()
2516                         self._build_dir.clean_log()
2517                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2518                         if self.logfile is None:
2519                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2520
2521                 phase = "fetch"
2522                 if self.fetchall:
2523                         phase = "fetchall"
2524
2525                 # If any incremental variables have been overridden
2526                 # via the environment, those values need to be passed
2527                 # along here so that they are correctly considered by
2528                 # the config instance in the subproccess.
2529                 fetch_env = os.environ.copy()
2530
2531                 nocolor = settings.get("NOCOLOR")
2532                 if nocolor is not None:
2533                         fetch_env["NOCOLOR"] = nocolor
2534
2535                 fetch_env["PORTAGE_NICENESS"] = "0"
2536                 if self.prefetch:
2537                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2538
2539                 ebuild_binary = os.path.join(
2540                         settings["PORTAGE_BIN_PATH"], "ebuild")
2541
2542                 fetch_args = [ebuild_binary, ebuild_path, phase]
2543                 debug = settings.get("PORTAGE_DEBUG") == "1"
2544                 if debug:
2545                         fetch_args.append("--debug")
2546
2547                 self.args = fetch_args
2548                 self.env = fetch_env
2549                 SpawnProcess._start(self)
2550
2551         def _pipe(self, fd_pipes):
2552                 """When appropriate, use a pty so that fetcher progress bars,
2553                 like wget has, will work properly."""
2554                 if self.background or not sys.stdout.isatty():
2555                         # When the output only goes to a log file,
2556                         # there's no point in creating a pty.
2557                         return os.pipe()
2558                 stdout_pipe = fd_pipes.get(1)
2559                 got_pty, master_fd, slave_fd = \
2560                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2561                 return (master_fd, slave_fd)
2562
2563         def _set_returncode(self, wait_retval):
2564                 SpawnProcess._set_returncode(self, wait_retval)
2565                 # Collect elog messages that might have been
2566                 # created by the pkg_nofetch phase.
2567                 if self._build_dir is not None:
2568                         # Skip elog messages for prefetch, in order to avoid duplicates.
2569                         if not self.prefetch and self.returncode != os.EX_OK:
2570                                 elog_out = None
2571                                 if self.logfile is not None:
2572                                         if self.background:
2573                                                 elog_out = open(self.logfile, 'a')
2574                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2575                                 if self.logfile is not None:
2576                                         msg += ", Log file:"
2577                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2578                                 if self.logfile is not None:
2579                                         eerror(" '%s'" % (self.logfile,),
2580                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2581                                 if elog_out is not None:
2582                                         elog_out.close()
2583                         if not self.prefetch:
2584                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2585                         features = self._build_dir.settings.features
2586                         if self.returncode == os.EX_OK:
2587                                 self._build_dir.clean_log()
2588                         self._build_dir.unlock()
2589                         self.config_pool.deallocate(self._build_dir.settings)
2590                         self._build_dir = None
2591
2592 class EbuildBuildDir(SlotObject):
2593
2594         __slots__ = ("dir_path", "pkg", "settings",
2595                 "locked", "_catdir", "_lock_obj")
2596
2597         def __init__(self, **kwargs):
2598                 SlotObject.__init__(self, **kwargs)
2599                 self.locked = False
2600
2601         def lock(self):
2602                 """
2603                 This raises an AlreadyLocked exception if lock() is called
2604                 while a lock is already held. In order to avoid this, call
2605                 unlock() or check whether the "locked" attribute is True
2606                 or False before calling lock().
2607                 """
2608                 if self._lock_obj is not None:
2609                         raise self.AlreadyLocked((self._lock_obj,))
2610
2611                 dir_path = self.dir_path
2612                 if dir_path is None:
2613                         root_config = self.pkg.root_config
2614                         portdb = root_config.trees["porttree"].dbapi
2615                         ebuild_path = portdb.findname(self.pkg.cpv)
2616                         settings = self.settings
2617                         settings.setcpv(self.pkg)
2618                         debug = settings.get("PORTAGE_DEBUG") == "1"
2619                         use_cache = 1 # always true
2620                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2621                                 self.settings, debug, use_cache, portdb)
2622                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2623
2624                 catdir = os.path.dirname(dir_path)
2625                 self._catdir = catdir
2626
2627                 portage.util.ensure_dirs(os.path.dirname(catdir),
2628                         gid=portage.portage_gid,
2629                         mode=070, mask=0)
2630                 catdir_lock = None
2631                 try:
2632                         catdir_lock = portage.locks.lockdir(catdir)
2633                         portage.util.ensure_dirs(catdir,
2634                                 gid=portage.portage_gid,
2635                                 mode=070, mask=0)
2636                         self._lock_obj = portage.locks.lockdir(dir_path)
2637                 finally:
2638                         self.locked = self._lock_obj is not None
2639                         if catdir_lock is not None:
2640                                 portage.locks.unlockdir(catdir_lock)
2641
2642         def clean_log(self):
2643                 """Discard existing log."""
2644                 settings = self.settings
2645
2646                 for x in ('.logid', 'temp/build.log'):
2647                         try:
2648                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2649                         except OSError:
2650                                 pass
2651
2652         def unlock(self):
2653                 if self._lock_obj is None:
2654                         return
2655
2656                 portage.locks.unlockdir(self._lock_obj)
2657                 self._lock_obj = None
2658                 self.locked = False
2659
2660                 catdir = self._catdir
2661                 catdir_lock = None
2662                 try:
2663                         catdir_lock = portage.locks.lockdir(catdir)
2664                 finally:
2665                         if catdir_lock:
2666                                 try:
2667                                         os.rmdir(catdir)
2668                                 except OSError, e:
2669                                         if e.errno not in (errno.ENOENT,
2670                                                 errno.ENOTEMPTY, errno.EEXIST):
2671                                                 raise
2672                                         del e
2673                                 portage.locks.unlockdir(catdir_lock)
2674
2675         class AlreadyLocked(portage.exception.PortageException):
2676                 pass
2677
2678 class EbuildBuild(CompositeTask):
2679
2680         __slots__ = ("args_set", "config_pool", "find_blockers",
2681                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2682                 "prefetcher", "settings", "world_atom") + \
2683                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2684
2685         def _start(self):
2686
2687                 logger = self.logger
2688                 opts = self.opts
2689                 pkg = self.pkg
2690                 settings = self.settings
2691                 world_atom = self.world_atom
2692                 root_config = pkg.root_config
2693                 tree = "porttree"
2694                 self._tree = tree
2695                 portdb = root_config.trees[tree].dbapi
2696                 settings.setcpv(pkg)
2697                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2698                 ebuild_path = portdb.findname(self.pkg.cpv)
2699                 self._ebuild_path = ebuild_path
2700
2701                 prefetcher = self.prefetcher
2702                 if prefetcher is None:
2703                         pass
2704                 elif not prefetcher.isAlive():
2705                         prefetcher.cancel()
2706                 elif prefetcher.poll() is None:
2707
2708                         waiting_msg = "Fetching files " + \
2709                                 "in the background. " + \
2710                                 "To view fetch progress, run `tail -f " + \
2711                                 "/var/log/emerge-fetch.log` in another " + \
2712                                 "terminal."
2713                         msg_prefix = colorize("GOOD", " * ")
2714                         from textwrap import wrap
2715                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2716                                 for line in wrap(waiting_msg, 65))
2717                         if not self.background:
2718                                 writemsg(waiting_msg, noiselevel=-1)
2719
2720                         self._current_task = prefetcher
2721                         prefetcher.addExitListener(self._prefetch_exit)
2722                         return
2723
2724                 self._prefetch_exit(prefetcher)
2725
2726         def _prefetch_exit(self, prefetcher):
2727
2728                 opts = self.opts
2729                 pkg = self.pkg
2730                 settings = self.settings
2731
2732                 if opts.fetchonly:
2733                                 fetcher = EbuildFetchonly(
2734                                         fetch_all=opts.fetch_all_uri,
2735                                         pkg=pkg, pretend=opts.pretend,
2736                                         settings=settings)
2737                                 retval = fetcher.execute()
2738                                 self.returncode = retval
2739                                 self.wait()
2740                                 return
2741
2742                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2743                         fetchall=opts.fetch_all_uri,
2744                         fetchonly=opts.fetchonly,
2745                         background=self.background,
2746                         pkg=pkg, scheduler=self.scheduler)
2747
2748                 self._start_task(fetcher, self._fetch_exit)
2749
2750         def _fetch_exit(self, fetcher):
2751                 opts = self.opts
2752                 pkg = self.pkg
2753
2754                 fetch_failed = False
2755                 if opts.fetchonly:
2756                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2757                 else:
2758                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2759
2760                 if fetch_failed and fetcher.logfile is not None and \
2761                         os.path.exists(fetcher.logfile):
2762                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2763
2764                 if not fetch_failed and fetcher.logfile is not None:
2765                         # Fetch was successful, so remove the fetch log.
2766                         try:
2767                                 os.unlink(fetcher.logfile)
2768                         except OSError:
2769                                 pass
2770
2771                 if fetch_failed or opts.fetchonly:
2772                         self.wait()
2773                         return
2774
2775                 logger = self.logger
2776                 opts = self.opts
2777                 pkg_count = self.pkg_count
2778                 scheduler = self.scheduler
2779                 settings = self.settings
2780                 features = settings.features
2781                 ebuild_path = self._ebuild_path
2782                 system_set = pkg.root_config.sets["system"]
2783
2784                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2785                 self._build_dir.lock()
2786
2787                 # Cleaning is triggered before the setup
2788                 # phase, in portage.doebuild().
2789                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2790                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2791                 short_msg = "emerge: (%s of %s) %s Clean" % \
2792                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2793                 logger.log(msg, short_msg=short_msg)
2794
2795                 #buildsyspkg: Check if we need to _force_ binary package creation
2796                 self._issyspkg = "buildsyspkg" in features and \
2797                                 system_set.findAtomForPackage(pkg) and \
2798                                 not opts.buildpkg
2799
2800                 if opts.buildpkg or self._issyspkg:
2801
2802                         self._buildpkg = True
2803
2804                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2805                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2806                         short_msg = "emerge: (%s of %s) %s Compile" % \
2807                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2808                         logger.log(msg, short_msg=short_msg)
2809
2810                 else:
2811                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2812                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2813                         short_msg = "emerge: (%s of %s) %s Compile" % \
2814                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2815                         logger.log(msg, short_msg=short_msg)
2816
2817                 build = EbuildExecuter(background=self.background, pkg=pkg,
2818                         scheduler=scheduler, settings=settings)
2819                 self._start_task(build, self._build_exit)
2820
2821         def _unlock_builddir(self):
2822                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2823                 self._build_dir.unlock()
2824
2825         def _build_exit(self, build):
2826                 if self._default_exit(build) != os.EX_OK:
2827                         self._unlock_builddir()
2828                         self.wait()
2829                         return
2830
2831                 opts = self.opts
2832                 buildpkg = self._buildpkg
2833
2834                 if not buildpkg:
2835                         self._final_exit(build)
2836                         self.wait()
2837                         return
2838
2839                 if self._issyspkg:
2840                         msg = ">>> This is a system package, " + \
2841                                 "let's pack a rescue tarball.\n"
2842
2843                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2844                         if log_path is not None:
2845                                 log_file = open(log_path, 'a')
2846                                 try:
2847                                         log_file.write(msg)
2848                                 finally:
2849                                         log_file.close()
2850
2851                         if not self.background:
2852                                 portage.writemsg_stdout(msg, noiselevel=-1)
2853
2854                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2855                         scheduler=self.scheduler, settings=self.settings)
2856
2857                 self._start_task(packager, self._buildpkg_exit)
2858
2859         def _buildpkg_exit(self, packager):
2860                 """
2861                 Released build dir lock when there is a failure or
2862                 when in buildpkgonly mode. Otherwise, the lock will
2863                 be released when merge() is called.
2864                 """
2865
2866                 if self._default_exit(packager) != os.EX_OK:
2867                         self._unlock_builddir()
2868                         self.wait()
2869                         return
2870
2871                 if self.opts.buildpkgonly:
2872                         # Need to call "clean" phase for buildpkgonly mode
2873                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2874                         phase = "clean"
2875                         clean_phase = EbuildPhase(background=self.background,
2876                                 pkg=self.pkg, phase=phase,
2877                                 scheduler=self.scheduler, settings=self.settings,
2878                                 tree=self._tree)
2879                         self._start_task(clean_phase, self._clean_exit)
2880                         return
2881
2882                 # Continue holding the builddir lock until
2883                 # after the package has been installed.
2884                 self._current_task = None
2885                 self.returncode = packager.returncode
2886                 self.wait()
2887
2888         def _clean_exit(self, clean_phase):
2889                 if self._final_exit(clean_phase) != os.EX_OK or \
2890                         self.opts.buildpkgonly:
2891                         self._unlock_builddir()
2892                 self.wait()
2893
2894         def install(self):
2895                 """
2896                 Install the package and then clean up and release locks.
2897                 Only call this after the build has completed successfully
2898                 and neither fetchonly nor buildpkgonly mode are enabled.
2899                 """
2900
2901                 find_blockers = self.find_blockers
2902                 ldpath_mtimes = self.ldpath_mtimes
2903                 logger = self.logger
2904                 pkg = self.pkg
2905                 pkg_count = self.pkg_count
2906                 settings = self.settings
2907                 world_atom = self.world_atom
2908                 ebuild_path = self._ebuild_path
2909                 tree = self._tree
2910
2911                 merge = EbuildMerge(find_blockers=self.find_blockers,
2912                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2913                         pkg_count=pkg_count, pkg_path=ebuild_path,
2914                         scheduler=self.scheduler,
2915                         settings=settings, tree=tree, world_atom=world_atom)
2916
2917                 msg = " === (%s of %s) Merging (%s::%s)" % \
2918                         (pkg_count.curval, pkg_count.maxval,
2919                         pkg.cpv, ebuild_path)
2920                 short_msg = "emerge: (%s of %s) %s Merge" % \
2921                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2922                 logger.log(msg, short_msg=short_msg)
2923
2924                 try:
2925                         rval = merge.execute()
2926                 finally:
2927                         self._unlock_builddir()
2928
2929                 return rval
2930
2931 class EbuildExecuter(CompositeTask):
2932
2933         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2934
2935         _phases = ("prepare", "configure", "compile", "test", "install")
2936
2937         _live_eclasses = frozenset([
2938                 "bzr",
2939                 "cvs",
2940                 "darcs",
2941                 "git",
2942                 "mercurial",
2943                 "subversion"
2944         ])
2945
2946         def _start(self):
2947                 self._tree = "porttree"
2948                 pkg = self.pkg
2949                 phase = "clean"
2950                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2951                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2952                 self._start_task(clean_phase, self._clean_phase_exit)
2953
2954         def _clean_phase_exit(self, clean_phase):
2955
2956                 if self._default_exit(clean_phase) != os.EX_OK:
2957                         self.wait()
2958                         return
2959
2960                 pkg = self.pkg
2961                 scheduler = self.scheduler
2962                 settings = self.settings
2963                 cleanup = 1
2964
2965                 # This initializes PORTAGE_LOG_FILE.
2966                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2967
2968                 setup_phase = EbuildPhase(background=self.background,
2969                         pkg=pkg, phase="setup", scheduler=scheduler,
2970                         settings=settings, tree=self._tree)
2971
2972                 setup_phase.addExitListener(self._setup_exit)
2973                 self._current_task = setup_phase
2974                 self.scheduler.scheduleSetup(setup_phase)
2975
2976         def _setup_exit(self, setup_phase):
2977
2978                 if self._default_exit(setup_phase) != os.EX_OK:
2979                         self.wait()
2980                         return
2981
2982                 unpack_phase = EbuildPhase(background=self.background,
2983                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2984                         settings=self.settings, tree=self._tree)
2985
2986                 if self._live_eclasses.intersection(self.pkg.inherited):
2987                         # Serialize $DISTDIR access for live ebuilds since
2988                         # otherwise they can interfere with eachother.
2989
2990                         unpack_phase.addExitListener(self._unpack_exit)
2991                         self._current_task = unpack_phase
2992                         self.scheduler.scheduleUnpack(unpack_phase)
2993
2994                 else:
2995                         self._start_task(unpack_phase, self._unpack_exit)
2996
2997         def _unpack_exit(self, unpack_phase):
2998
2999                 if self._default_exit(unpack_phase) != os.EX_OK:
3000                         self.wait()
3001                         return
3002
3003                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3004
3005                 pkg = self.pkg
3006                 phases = self._phases
3007                 eapi = pkg.metadata["EAPI"]
3008                 if eapi in ("0", "1"):
3009                         # skip src_prepare and src_configure
3010                         phases = phases[2:]
3011
3012                 for phase in phases:
3013                         ebuild_phases.add(EbuildPhase(background=self.background,
3014                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3015                                 settings=self.settings, tree=self._tree))
3016
3017                 self._start_task(ebuild_phases, self._default_final_exit)
3018
3019 class EbuildMetadataPhase(SubProcess):
3020
3021         """
3022         Asynchronous interface for the ebuild "depend" phase which is
3023         used to extract metadata from the ebuild.
3024         """
3025
3026         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3027                 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3028                 ("_raw_metadata",)
3029
3030         _file_names = ("ebuild",)
3031         _files_dict = slot_dict_class(_file_names, prefix="")
3032         _metadata_fd = 9
3033
3034         def _start(self):
3035                 settings = self.settings
3036                 settings.setcpv(self.cpv)
3037                 ebuild_path = self.ebuild_path
3038
3039                 eapi = None
3040                 if 'parse-eapi-glep-55' in settings.features:
3041                         pf, eapi = portage._split_ebuild_name_glep55(
3042                                 os.path.basename(ebuild_path))
3043                 if eapi is None and \
3044                         'parse-eapi-ebuild-head' in settings.features:
3045                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3046                                 mode='r', encoding='utf_8', errors='replace'))
3047
3048                 if eapi is not None:
3049                         if not portage.eapi_is_supported(eapi):
3050                                 self.metadata_callback(self.cpv, self.ebuild_path,
3051                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3052                                 self.returncode = os.EX_OK
3053                                 self.wait()
3054                                 return
3055
3056                         settings.configdict['pkg']['EAPI'] = eapi
3057
3058                 debug = settings.get("PORTAGE_DEBUG") == "1"
3059                 master_fd = None
3060                 slave_fd = None
3061                 fd_pipes = None
3062                 if self.fd_pipes is not None:
3063                         fd_pipes = self.fd_pipes.copy()
3064                 else:
3065                         fd_pipes = {}
3066
3067                 fd_pipes.setdefault(0, sys.stdin.fileno())
3068                 fd_pipes.setdefault(1, sys.stdout.fileno())
3069                 fd_pipes.setdefault(2, sys.stderr.fileno())
3070
3071                 # flush any pending output
3072                 for fd in fd_pipes.itervalues():
3073                         if fd == sys.stdout.fileno():
3074                                 sys.stdout.flush()
3075                         if fd == sys.stderr.fileno():
3076                                 sys.stderr.flush()
3077
3078                 fd_pipes_orig = fd_pipes.copy()
3079                 self._files = self._files_dict()
3080                 files = self._files
3081
3082                 master_fd, slave_fd = os.pipe()
3083                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3084                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3085
3086                 fd_pipes[self._metadata_fd] = slave_fd
3087
3088                 self._raw_metadata = []
3089                 files.ebuild = os.fdopen(master_fd, 'r')
3090                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3091                         self._registered_events, self._output_handler)
3092                 self._registered = True
3093
3094                 retval = portage.doebuild(ebuild_path, "depend",
3095                         settings["ROOT"], settings, debug,
3096                         mydbapi=self.portdb, tree="porttree",
3097                         fd_pipes=fd_pipes, returnpid=True)
3098
3099                 os.close(slave_fd)
3100
3101                 if isinstance(retval, int):
3102                         # doebuild failed before spawning
3103                         self._unregister()
3104                         self.returncode = retval
3105                         self.wait()
3106                         return
3107
3108                 self.pid = retval[0]
3109                 portage.process.spawned_pids.remove(self.pid)
3110
3111         def _output_handler(self, fd, event):
3112
3113                 if event & PollConstants.POLLIN:
3114                         self._raw_metadata.append(self._files.ebuild.read())
3115                         if not self._raw_metadata[-1]:
3116                                 self._unregister()
3117                                 self.wait()
3118
3119                 self._unregister_if_appropriate(event)
3120                 return self._registered
3121
3122         def _set_returncode(self, wait_retval):
3123                 SubProcess._set_returncode(self, wait_retval)
3124                 if self.returncode == os.EX_OK:
3125                         metadata_lines = "".join(self._raw_metadata).splitlines()
3126                         if len(portage.auxdbkeys) != len(metadata_lines):
3127                                 # Don't trust bash's returncode if the
3128                                 # number of lines is incorrect.
3129                                 self.returncode = 1
3130                         else:
3131                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3132                                 self.metadata = self.metadata_callback(self.cpv,
3133                                         self.ebuild_path, self.repo_path, metadata,
3134                                         self.ebuild_mtime)
3135
3136 class EbuildProcess(SpawnProcess):
3137
3138         __slots__ = ("phase", "pkg", "settings", "tree")
3139
3140         def _start(self):
3141                 # Don't open the log file during the clean phase since the
3142                 # open file can result in an nfs lock on $T/build.log which
3143                 # prevents the clean phase from removing $T.
3144                 if self.phase not in ("clean", "cleanrm"):
3145                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3146                 SpawnProcess._start(self)
3147
3148         def _pipe(self, fd_pipes):
3149                 stdout_pipe = fd_pipes.get(1)
3150                 got_pty, master_fd, slave_fd = \
3151                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3152                 return (master_fd, slave_fd)
3153
3154         def _spawn(self, args, **kwargs):
3155
3156                 root_config = self.pkg.root_config
3157                 tree = self.tree
3158                 mydbapi = root_config.trees[tree].dbapi
3159                 settings = self.settings
3160                 ebuild_path = settings["EBUILD"]
3161                 debug = settings.get("PORTAGE_DEBUG") == "1"
3162
3163                 rval = portage.doebuild(ebuild_path, self.phase,
3164                         root_config.root, settings, debug,
3165                         mydbapi=mydbapi, tree=tree, **kwargs)
3166
3167                 return rval
3168
3169         def _set_returncode(self, wait_retval):
3170                 SpawnProcess._set_returncode(self, wait_retval)
3171
3172                 if self.phase not in ("clean", "cleanrm"):
3173                         self.returncode = portage._doebuild_exit_status_check_and_log(
3174                                 self.settings, self.phase, self.returncode)
3175
3176                 if self.phase == "test" and self.returncode != os.EX_OK and \
3177                         "test-fail-continue" in self.settings.features:
3178                         self.returncode = os.EX_OK
3179
3180                 portage._post_phase_userpriv_perms(self.settings)
3181
3182 class EbuildPhase(CompositeTask):
3183
3184         __slots__ = ("background", "pkg", "phase",
3185                 "scheduler", "settings", "tree")
3186
3187         _post_phase_cmds = portage._post_phase_cmds
3188
3189         def _start(self):
3190
3191                 ebuild_process = EbuildProcess(background=self.background,
3192                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3193                         settings=self.settings, tree=self.tree)
3194
3195                 self._start_task(ebuild_process, self._ebuild_exit)
3196
3197         def _ebuild_exit(self, ebuild_process):
3198
3199                 if self.phase == "install":
3200                         out = None
3201                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3202                         log_file = None
3203                         if self.background and log_path is not None:
3204                                 log_file = open(log_path, 'a')
3205                                 out = log_file
3206                         try:
3207                                 portage._check_build_log(self.settings, out=out)
3208                         finally:
3209                                 if log_file is not None:
3210                                         log_file.close()
3211
3212                 if self._default_exit(ebuild_process) != os.EX_OK:
3213                         self.wait()
3214                         return
3215
3216                 settings = self.settings
3217
3218                 if self.phase == "install":
3219                         portage._post_src_install_chost_fix(settings)
3220                         portage._post_src_install_uid_fix(settings)
3221
3222                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3223                 if post_phase_cmds is not None:
3224                         post_phase = MiscFunctionsProcess(background=self.background,
3225                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3226                                 scheduler=self.scheduler, settings=settings)
3227                         self._start_task(post_phase, self._post_phase_exit)
3228                         return
3229
3230                 self.returncode = ebuild_process.returncode
3231                 self._current_task = None
3232                 self.wait()
3233
3234         def _post_phase_exit(self, post_phase):
3235                 if self._final_exit(post_phase) != os.EX_OK:
3236                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3237                                 noiselevel=-1)
3238                 self._current_task = None
3239                 self.wait()
3240                 return
3241
3242 class EbuildBinpkg(EbuildProcess):
3243         """
3244         This assumes that src_install() has successfully completed.
3245         """
3246         __slots__ = ("_binpkg_tmpfile",)
3247
3248         def _start(self):
3249                 self.phase = "package"
3250                 self.tree = "porttree"
3251                 pkg = self.pkg
3252                 root_config = pkg.root_config
3253                 portdb = root_config.trees["porttree"].dbapi
3254                 bintree = root_config.trees["bintree"]
3255                 ebuild_path = portdb.findname(self.pkg.cpv)
3256                 settings = self.settings
3257                 debug = settings.get("PORTAGE_DEBUG") == "1"
3258
3259                 bintree.prevent_collision(pkg.cpv)
3260                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3261                         pkg.cpv + ".tbz2." + str(os.getpid()))
3262                 self._binpkg_tmpfile = binpkg_tmpfile
3263                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3264                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3265
3266                 try:
3267                         EbuildProcess._start(self)
3268                 finally:
3269                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3270
3271         def _set_returncode(self, wait_retval):
3272                 EbuildProcess._set_returncode(self, wait_retval)
3273
3274                 pkg = self.pkg
3275                 bintree = pkg.root_config.trees["bintree"]
3276                 binpkg_tmpfile = self._binpkg_tmpfile
3277                 if self.returncode == os.EX_OK:
3278                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3279
3280 class EbuildMerge(SlotObject):
3281
3282         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3283                 "pkg", "pkg_count", "pkg_path", "pretend",
3284                 "scheduler", "settings", "tree", "world_atom")
3285
3286         def execute(self):
3287                 root_config = self.pkg.root_config
3288                 settings = self.settings
3289                 retval = portage.merge(settings["CATEGORY"],
3290                         settings["PF"], settings["D"],
3291                         os.path.join(settings["PORTAGE_BUILDDIR"],
3292                         "build-info"), root_config.root, settings,
3293                         myebuild=settings["EBUILD"],
3294                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3295                         vartree=root_config.trees["vartree"],
3296                         prev_mtimes=self.ldpath_mtimes,
3297                         scheduler=self.scheduler,
3298                         blockers=self.find_blockers)
3299
3300                 if retval == os.EX_OK:
3301                         self.world_atom(self.pkg)
3302                         self._log_success()
3303
3304                 return retval
3305
3306         def _log_success(self):
3307                 pkg = self.pkg
3308                 pkg_count = self.pkg_count
3309                 pkg_path = self.pkg_path
3310                 logger = self.logger
3311                 if "noclean" not in self.settings.features:
3312                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3313                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3314                         logger.log((" === (%s of %s) " + \
3315                                 "Post-Build Cleaning (%s::%s)") % \
3316                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3317                                 short_msg=short_msg)
3318                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3319                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3320
3321 class PackageUninstall(AsynchronousTask):
3322
3323         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3324
3325         def _start(self):
3326                 try:
3327                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3328                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3329                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3330                                 writemsg_level=self._writemsg_level)
3331                 except UninstallFailure, e:
3332                         self.returncode = e.status
3333                 else:
3334                         self.returncode = os.EX_OK
3335                 self.wait()
3336
3337         def _writemsg_level(self, msg, level=0, noiselevel=0):
3338
3339                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3340                 background = self.background
3341
3342                 if log_path is None:
3343                         if not (background and level < logging.WARNING):
3344                                 portage.util.writemsg_level(msg,
3345                                         level=level, noiselevel=noiselevel)
3346                 else:
3347                         if not background:
3348                                 portage.util.writemsg_level(msg,
3349                                         level=level, noiselevel=noiselevel)
3350
3351                         f = open(log_path, 'a')
3352                         try:
3353                                 f.write(msg)
3354                         finally:
3355                                 f.close()
3356
3357 class Binpkg(CompositeTask):
3358
3359         __slots__ = ("find_blockers",
3360                 "ldpath_mtimes", "logger", "opts",
3361                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3362                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3363                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3364
3365         def _writemsg_level(self, msg, level=0, noiselevel=0):
3366
3367                 if not self.background:
3368                         portage.util.writemsg_level(msg,
3369                                 level=level, noiselevel=noiselevel)
3370
3371                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3372                 if  log_path is not None:
3373                         f = open(log_path, 'a')
3374                         try:
3375                                 f.write(msg)
3376                         finally:
3377                                 f.close()
3378
3379         def _start(self):
3380
3381                 pkg = self.pkg
3382                 settings = self.settings
3383                 settings.setcpv(pkg)
3384                 self._tree = "bintree"
3385                 self._bintree = self.pkg.root_config.trees[self._tree]
3386                 self._verify = not self.opts.pretend
3387
3388                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3389                         "portage", pkg.category, pkg.pf)
3390                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3391                         pkg=pkg, settings=settings)
3392                 self._image_dir = os.path.join(dir_path, "image")
3393                 self._infloc = os.path.join(dir_path, "build-info")
3394                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3395                 settings["EBUILD"] = self._ebuild_path
3396                 debug = settings.get("PORTAGE_DEBUG") == "1"
3397                 portage.doebuild_environment(self._ebuild_path, "setup",
3398                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3399                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3400
3401                 # The prefetcher has already completed or it
3402                 # could be running now. If it's running now,
3403                 # wait for it to complete since it holds
3404                 # a lock on the file being fetched. The
3405                 # portage.locks functions are only designed
3406                 # to work between separate processes. Since
3407                 # the lock is held by the current process,
3408                 # use the scheduler and fetcher methods to
3409                 # synchronize with the fetcher.
3410                 prefetcher = self.prefetcher
3411                 if prefetcher is None:
3412                         pass
3413                 elif not prefetcher.isAlive():
3414                         prefetcher.cancel()
3415                 elif prefetcher.poll() is None:
3416
3417                         waiting_msg = ("Fetching '%s' " + \
3418                                 "in the background. " + \
3419                                 "To view fetch progress, run `tail -f " + \
3420                                 "/var/log/emerge-fetch.log` in another " + \
3421                                 "terminal.") % prefetcher.pkg_path
3422                         msg_prefix = colorize("GOOD", " * ")
3423                         from textwrap import wrap
3424                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3425                                 for line in wrap(waiting_msg, 65))
3426                         if not self.background:
3427                                 writemsg(waiting_msg, noiselevel=-1)
3428
3429                         self._current_task = prefetcher
3430                         prefetcher.addExitListener(self._prefetch_exit)
3431                         return
3432
3433                 self._prefetch_exit(prefetcher)
3434
3435         def _prefetch_exit(self, prefetcher):
3436
3437                 pkg = self.pkg
3438                 pkg_count = self.pkg_count
3439                 if not (self.opts.pretend or self.opts.fetchonly):
3440                         self._build_dir.lock()
3441                         # If necessary, discard old log so that we don't
3442                         # append to it.
3443                         self._build_dir.clean_log()
3444                         # Initialze PORTAGE_LOG_FILE.
3445                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3446                 fetcher = BinpkgFetcher(background=self.background,
3447                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3448                         pretend=self.opts.pretend, scheduler=self.scheduler)
3449                 pkg_path = fetcher.pkg_path
3450                 self._pkg_path = pkg_path
3451
3452                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3453
3454                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3455                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3456                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3457                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3458                         self.logger.log(msg, short_msg=short_msg)
3459                         self._start_task(fetcher, self._fetcher_exit)
3460                         return
3461
3462                 self._fetcher_exit(fetcher)
3463
3464         def _fetcher_exit(self, fetcher):
3465
3466                 # The fetcher only has a returncode when
3467                 # --getbinpkg is enabled.
3468                 if fetcher.returncode is not None:
3469                         self._fetched_pkg = True
3470                         if self._default_exit(fetcher) != os.EX_OK:
3471                                 self._unlock_builddir()
3472                                 self.wait()
3473                                 return
3474
3475                 if self.opts.pretend:
3476                         self._current_task = None
3477                         self.returncode = os.EX_OK
3478                         self.wait()
3479                         return
3480
3481                 verifier = None
3482                 if self._verify:
3483                         logfile = None
3484                         if self.background:
3485                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3486                         verifier = BinpkgVerifier(background=self.background,
3487                                 logfile=logfile, pkg=self.pkg)
3488                         self._start_task(verifier, self._verifier_exit)
3489                         return
3490
3491                 self._verifier_exit(verifier)
3492
3493         def _verifier_exit(self, verifier):
3494                 if verifier is not None and \
3495                         self._default_exit(verifier) != os.EX_OK:
3496                         self._unlock_builddir()
3497                         self.wait()
3498                         return
3499
3500                 logger = self.logger
3501                 pkg = self.pkg
3502                 pkg_count = self.pkg_count
3503                 pkg_path = self._pkg_path
3504
3505                 if self._fetched_pkg:
3506                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3507
3508                 if self.opts.fetchonly:
3509                         self._current_task = None
3510                         self.returncode = os.EX_OK
3511                         self.wait()
3512                         return
3513
3514                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3515                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3516                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3517                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3518                 logger.log(msg, short_msg=short_msg)
3519
3520                 phase = "clean"
3521                 settings = self.settings
3522                 ebuild_phase = EbuildPhase(background=self.background,
3523                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3524                         settings=settings, tree=self._tree)
3525
3526                 self._start_task(ebuild_phase, self._clean_exit)
3527
3528         def _clean_exit(self, clean_phase):
3529                 if self._default_exit(clean_phase) != os.EX_OK:
3530                         self._unlock_builddir()
3531                         self.wait()
3532                         return
3533
3534                 dir_path = self._build_dir.dir_path
3535
3536                 infloc = self._infloc
3537                 pkg = self.pkg
3538                 pkg_path = self._pkg_path
3539
3540                 dir_mode = 0755
3541                 for mydir in (dir_path, self._image_dir, infloc):
3542                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3543                                 gid=portage.data.portage_gid, mode=dir_mode)
3544
3545                 # This initializes PORTAGE_LOG_FILE.
3546                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3547                 self._writemsg_level(">>> Extracting info\n")
3548
3549                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3550                 check_missing_metadata = ("CATEGORY", "PF")
3551                 missing_metadata = set()
3552                 for k in check_missing_metadata:
3553                         v = pkg_xpak.getfile(k)
3554                         if not v:
3555                                 missing_metadata.add(k)
3556
3557                 pkg_xpak.unpackinfo(infloc)
3558                 for k in missing_metadata:
3559                         if k == "CATEGORY":
3560                                 v = pkg.category
3561                         elif k == "PF":
3562                                 v = pkg.pf
3563                         else:
3564                                 continue
3565
3566                         f = open(os.path.join(infloc, k), 'wb')
3567                         try:
3568                                 f.write(v + "\n")
3569                         finally:
3570                                 f.close()
3571
3572                 # Store the md5sum in the vdb.
3573                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3574                 try:
3575                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3576                 finally:
3577                         f.close()
3578
3579                 # This gives bashrc users an opportunity to do various things
3580                 # such as remove binary packages after they're installed.
3581                 settings = self.settings
3582                 settings.setcpv(self.pkg)
3583                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3584                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3585
3586                 phase = "setup"
3587                 setup_phase = EbuildPhase(background=self.background,
3588                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3589                         settings=settings, tree=self._tree)
3590
3591                 setup_phase.addExitListener(self._setup_exit)
3592                 self._current_task = setup_phase
3593                 self.scheduler.scheduleSetup(setup_phase)
3594
3595         def _setup_exit(self, setup_phase):
3596                 if self._default_exit(setup_phase) != os.EX_OK:
3597                         self._unlock_builddir()
3598                         self.wait()
3599                         return
3600
3601                 extractor = BinpkgExtractorAsync(background=self.background,
3602                         image_dir=self._image_dir,
3603                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3604                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3605                 self._start_task(extractor, self._extractor_exit)
3606
3607         def _extractor_exit(self, extractor):
3608                 if self._final_exit(extractor) != os.EX_OK:
3609                         self._unlock_builddir()
3610                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3611                                 noiselevel=-1)
3612                 self.wait()
3613
3614         def _unlock_builddir(self):
3615                 if self.opts.pretend or self.opts.fetchonly:
3616                         return
3617                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3618                 self._build_dir.unlock()
3619
3620         def install(self):
3621
3622                 # This gives bashrc users an opportunity to do various things
3623                 # such as remove binary packages after they're installed.
3624                 settings = self.settings
3625                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3626                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3627
3628                 merge = EbuildMerge(find_blockers=self.find_blockers,
3629                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3630                         pkg=self.pkg, pkg_count=self.pkg_count,
3631                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3632                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3633
3634                 try:
3635                         retval = merge.execute()
3636                 finally:
3637                         settings.pop("PORTAGE_BINPKG_FILE", None)
3638                         self._unlock_builddir()
3639                 return retval
3640
3641 class BinpkgFetcher(SpawnProcess):
3642
3643         __slots__ = ("pkg", "pretend",
3644                 "locked", "pkg_path", "_lock_obj")
3645
3646         def __init__(self, **kwargs):
3647                 SpawnProcess.__init__(self, **kwargs)
3648                 pkg = self.pkg
3649                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3650
3651         def _start(self):
3652
3653                 if self.cancelled:
3654                         return
3655
3656                 pkg = self.pkg
3657                 pretend = self.pretend
3658                 bintree = pkg.root_config.trees["bintree"]
3659                 settings = bintree.settings
3660                 use_locks = "distlocks" in settings.features
3661                 pkg_path = self.pkg_path
3662
3663                 if not pretend:
3664                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3665                         if use_locks:
3666                                 self.lock()
3667                 exists = os.path.exists(pkg_path)
3668                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3669                 if not (pretend or resume):
3670                         # Remove existing file or broken symlink.
3671                         try:
3672                                 os.unlink(pkg_path)
3673                         except OSError:
3674                                 pass
3675
3676                 # urljoin doesn't work correctly with
3677                 # unrecognized protocols like sftp
3678                 if bintree._remote_has_index:
3679                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3680                         if not rel_uri:
3681                                 rel_uri = pkg.cpv + ".tbz2"
3682                         uri = bintree._remote_base_uri.rstrip("/") + \
3683                                 "/" + rel_uri.lstrip("/")
3684                 else:
3685                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3686                                 "/" + pkg.pf + ".tbz2"
3687
3688                 if pretend:
3689                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3690                         self.returncode = os.EX_OK
3691                         self.wait()
3692                         return
3693
3694                 protocol = urlparse.urlparse(uri)[0]
3695                 fcmd_prefix = "FETCHCOMMAND"
3696                 if resume:
3697                         fcmd_prefix = "RESUMECOMMAND"
3698                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3699                 if not fcmd:
3700                         fcmd = settings.get(fcmd_prefix)
3701
3702                 fcmd_vars = {
3703                         "DISTDIR" : os.path.dirname(pkg_path),
3704                         "URI"     : uri,
3705                         "FILE"    : os.path.basename(pkg_path)
3706                 }
3707
3708                 fetch_env = dict(settings.iteritems())
3709                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3710                         for x in shlex.split(fcmd)]
3711
3712                 if self.fd_pipes is None:
3713                         self.fd_pipes = {}
3714                 fd_pipes = self.fd_pipes
3715
3716                 # Redirect all output to stdout since some fetchers like
3717                 # wget pollute stderr (if portage detects a problem then it
3718                 # can send it's own message to stderr).
3719                 fd_pipes.setdefault(0, sys.stdin.fileno())
3720                 fd_pipes.setdefault(1, sys.stdout.fileno())
3721                 fd_pipes.setdefault(2, sys.stdout.fileno())
3722
3723                 self.args = fetch_args
3724                 self.env = fetch_env
3725                 SpawnProcess._start(self)
3726
3727         def _set_returncode(self, wait_retval):
3728                 SpawnProcess._set_returncode(self, wait_retval)
3729                 if self.returncode == os.EX_OK:
3730                         # If possible, update the mtime to match the remote package if
3731                         # the fetcher didn't already do it automatically.
3732                         bintree = self.pkg.root_config.trees["bintree"]
3733                         if bintree._remote_has_index:
3734                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3735                                 if remote_mtime is not None:
3736                                         try:
3737                                                 remote_mtime = long(remote_mtime)
3738                                         except ValueError:
3739                                                 pass
3740                                         else:
3741                                                 try:
3742                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3743                                                 except OSError:
3744                                                         pass
3745                                                 else:
3746                                                         if remote_mtime != local_mtime:
3747                                                                 try:
3748                                                                         os.utime(self.pkg_path,
3749                                                                                 (remote_mtime, remote_mtime))
3750                                                                 except OSError:
3751                                                                         pass
3752
3753                 if self.locked:
3754                         self.unlock()
3755
3756         def lock(self):
3757                 """
3758                 This raises an AlreadyLocked exception if lock() is called
3759                 while a lock is already held. In order to avoid this, call
3760                 unlock() or check whether the "locked" attribute is True
3761                 or False before calling lock().
3762                 """
3763                 if self._lock_obj is not None:
3764                         raise self.AlreadyLocked((self._lock_obj,))
3765
3766                 self._lock_obj = portage.locks.lockfile(
3767                         self.pkg_path, wantnewlockfile=1)
3768                 self.locked = True
3769
3770         class AlreadyLocked(portage.exception.PortageException):
3771                 pass
3772
3773         def unlock(self):
3774                 if self._lock_obj is None:
3775                         return
3776                 portage.locks.unlockfile(self._lock_obj)
3777                 self._lock_obj = None
3778                 self.locked = False
3779
3780 class BinpkgVerifier(AsynchronousTask):
3781         __slots__ = ("logfile", "pkg",)
3782
3783         def _start(self):
3784                 """
3785                 Note: Unlike a normal AsynchronousTask.start() method,
3786                 this one does all work is synchronously. The returncode
3787                 attribute will be set before it returns.
3788                 """
3789
3790                 pkg = self.pkg
3791                 root_config = pkg.root_config
3792                 bintree = root_config.trees["bintree"]
3793                 rval = os.EX_OK
3794                 stdout_orig = sys.stdout
3795                 stderr_orig = sys.stderr
3796                 log_file = None
3797                 if self.background and self.logfile is not None:
3798                         log_file = open(self.logfile, 'a')
3799                 try:
3800                         if log_file is not None:
3801                                 sys.stdout = log_file
3802                                 sys.stderr = log_file
3803                         try:
3804                                 bintree.digestCheck(pkg)
3805                         except portage.exception.FileNotFound:
3806                                 writemsg("!!! Fetching Binary failed " + \
3807                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3808                                 rval = 1
3809                         except portage.exception.DigestException, e:
3810                                 writemsg("\n!!! Digest verification failed:\n",
3811                                         noiselevel=-1)
3812                                 writemsg("!!! %s\n" % e.value[0],
3813                                         noiselevel=-1)
3814                                 writemsg("!!! Reason: %s\n" % e.value[1],
3815                                         noiselevel=-1)
3816                                 writemsg("!!! Got: %s\n" % e.value[2],
3817                                         noiselevel=-1)
3818                                 writemsg("!!! Expected: %s\n" % e.value[3],
3819                                         noiselevel=-1)
3820                                 rval = 1
3821                         if rval != os.EX_OK:
3822                                 pkg_path = bintree.getname(pkg.cpv)
3823                                 head, tail = os.path.split(pkg_path)
3824                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3825                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3826                                         noiselevel=-1)
3827                 finally:
3828                         sys.stdout = stdout_orig
3829                         sys.stderr = stderr_orig
3830                         if log_file is not None:
3831                                 log_file.close()
3832
3833                 self.returncode = rval
3834                 self.wait()
3835
3836 class BinpkgPrefetcher(CompositeTask):
3837
3838         __slots__ = ("pkg",) + \
3839                 ("pkg_path", "_bintree",)
3840
3841         def _start(self):
3842                 self._bintree = self.pkg.root_config.trees["bintree"]
3843                 fetcher = BinpkgFetcher(background=self.background,
3844                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3845                         scheduler=self.scheduler)
3846                 self.pkg_path = fetcher.pkg_path
3847                 self._start_task(fetcher, self._fetcher_exit)
3848
3849         def _fetcher_exit(self, fetcher):
3850
3851                 if self._default_exit(fetcher) != os.EX_OK:
3852                         self.wait()
3853                         return
3854
3855                 verifier = BinpkgVerifier(background=self.background,
3856                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3857                 self._start_task(verifier, self._verifier_exit)
3858
3859         def _verifier_exit(self, verifier):
3860                 if self._default_exit(verifier) != os.EX_OK:
3861                         self.wait()
3862                         return
3863
3864                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3865
3866                 self._current_task = None
3867                 self.returncode = os.EX_OK
3868                 self.wait()
3869
3870 class BinpkgExtractorAsync(SpawnProcess):
3871
3872         __slots__ = ("image_dir", "pkg", "pkg_path")
3873
3874         _shell_binary = portage.const.BASH_BINARY
3875
3876         def _start(self):
3877                 self.args = [self._shell_binary, "-c",
3878                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3879                         (portage._shell_quote(self.pkg_path),
3880                         portage._shell_quote(self.image_dir))]
3881
3882                 self.env = self.pkg.root_config.settings.environ()
3883                 SpawnProcess._start(self)
3884
3885 class MergeListItem(CompositeTask):
3886
3887         """
3888         TODO: For parallel scheduling, everything here needs asynchronous
3889         execution support (start, poll, and wait methods).
3890         """
3891
3892         __slots__ = ("args_set",
3893                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3894                 "find_blockers", "logger", "mtimedb", "pkg",
3895                 "pkg_count", "pkg_to_replace", "prefetcher",
3896                 "settings", "statusMessage", "world_atom") + \
3897                 ("_install_task",)
3898
3899         def _start(self):
3900
3901                 pkg = self.pkg
3902                 build_opts = self.build_opts
3903
3904                 if pkg.installed:
3905                         # uninstall,  executed by self.merge()
3906                         self.returncode = os.EX_OK
3907                         self.wait()
3908                         return
3909
3910                 args_set = self.args_set
3911                 find_blockers = self.find_blockers
3912                 logger = self.logger
3913                 mtimedb = self.mtimedb
3914                 pkg_count = self.pkg_count
3915                 scheduler = self.scheduler
3916                 settings = self.settings
3917                 world_atom = self.world_atom
3918                 ldpath_mtimes = mtimedb["ldpath"]
3919
3920                 action_desc = "Emerging"
3921                 preposition = "for"
3922                 if pkg.type_name == "binary":
3923                         action_desc += " binary"
3924
3925                 if build_opts.fetchonly:
3926                         action_desc = "Fetching"
3927
3928                 msg = "%s (%s of %s) %s" % \
3929                         (action_desc,
3930                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3931                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3932                         colorize("GOOD", pkg.cpv))
3933
3934                 portdb = pkg.root_config.trees["porttree"].dbapi
3935                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3936                 if portdir_repo_name:
3937                         pkg_repo_name = pkg.metadata.get("repository")
3938                         if pkg_repo_name != portdir_repo_name:
3939                                 if not pkg_repo_name:
3940                                         pkg_repo_name = "unknown repo"
3941                                 msg += " from %s" % pkg_repo_name
3942
3943                 if pkg.root != "/":
3944                         msg += " %s %s" % (preposition, pkg.root)
3945
3946                 if not build_opts.pretend:
3947                         self.statusMessage(msg)
3948                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3949                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3950
3951                 if pkg.type_name == "ebuild":
3952
3953                         build = EbuildBuild(args_set=args_set,
3954                                 background=self.background,
3955                                 config_pool=self.config_pool,
3956                                 find_blockers=find_blockers,
3957                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3958                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3959                                 prefetcher=self.prefetcher, scheduler=scheduler,
3960                                 settings=settings, world_atom=world_atom)
3961
3962                         self._install_task = build
3963                         self._start_task(build, self._default_final_exit)
3964                         return
3965
3966                 elif pkg.type_name == "binary":
3967
3968                         binpkg = Binpkg(background=self.background,
3969                                 find_blockers=find_blockers,
3970                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3971                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3972                                 prefetcher=self.prefetcher, settings=settings,
3973                                 scheduler=scheduler, world_atom=world_atom)
3974
3975                         self._install_task = binpkg
3976                         self._start_task(binpkg, self._default_final_exit)
3977                         return
3978
3979         def _poll(self):
3980                 self._install_task.poll()
3981                 return self.returncode
3982
3983         def _wait(self):
3984                 self._install_task.wait()
3985                 return self.returncode
3986
3987         def merge(self):
3988
3989                 pkg = self.pkg
3990                 build_opts = self.build_opts
3991                 find_blockers = self.find_blockers
3992                 logger = self.logger
3993                 mtimedb = self.mtimedb
3994                 pkg_count = self.pkg_count
3995                 prefetcher = self.prefetcher
3996                 scheduler = self.scheduler
3997                 settings = self.settings
3998                 world_atom = self.world_atom
3999                 ldpath_mtimes = mtimedb["ldpath"]
4000
4001                 if pkg.installed:
4002                         if not (build_opts.buildpkgonly or \
4003                                 build_opts.fetchonly or build_opts.pretend):
4004
4005                                 uninstall = PackageUninstall(background=self.background,
4006                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4007                                         pkg=pkg, scheduler=scheduler, settings=settings)
4008
4009                                 uninstall.start()
4010                                 retval = uninstall.wait()
4011                                 if retval != os.EX_OK:
4012                                         return retval
4013                         return os.EX_OK
4014
4015                 if build_opts.fetchonly or \
4016                         build_opts.buildpkgonly:
4017                         return self.returncode
4018
4019                 retval = self._install_task.install()
4020                 return retval
4021
4022 class PackageMerge(AsynchronousTask):
4023         """
4024         TODO: Implement asynchronous merge so that the scheduler can
4025         run while a merge is executing.
4026         """
4027
4028         __slots__ = ("merge",)
4029
4030         def _start(self):
4031
4032                 pkg = self.merge.pkg
4033                 pkg_count = self.merge.pkg_count
4034
4035                 if pkg.installed:
4036                         action_desc = "Uninstalling"
4037                         preposition = "from"
4038                         counter_str = ""
4039                 else:
4040                         action_desc = "Installing"
4041                         preposition = "to"
4042                         counter_str = "(%s of %s) " % \
4043                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4044                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4045
4046                 msg = "%s %s%s" % \
4047                         (action_desc,
4048                         counter_str,
4049                         colorize("GOOD", pkg.cpv))
4050
4051                 if pkg.root != "/":
4052                         msg += " %s %s" % (preposition, pkg.root)
4053
4054                 if not self.merge.build_opts.fetchonly and \
4055                         not self.merge.build_opts.pretend and \
4056                         not self.merge.build_opts.buildpkgonly:
4057                         self.merge.statusMessage(msg)
4058
4059                 self.returncode = self.merge.merge()
4060                 self.wait()
4061
4062 class DependencyArg(object):
4063         def __init__(self, arg=None, root_config=None):
4064                 self.arg = arg
4065                 self.root_config = root_config
4066
4067         def __str__(self):
4068                 return str(self.arg)
4069
4070 class AtomArg(DependencyArg):
4071         def __init__(self, atom=None, **kwargs):
4072                 DependencyArg.__init__(self, **kwargs)
4073                 self.atom = atom
4074                 if not isinstance(self.atom, portage.dep.Atom):
4075                         self.atom = portage.dep.Atom(self.atom)
4076                 self.set = (self.atom, )
4077
4078 class PackageArg(DependencyArg):
4079         def __init__(self, package=None, **kwargs):
4080                 DependencyArg.__init__(self, **kwargs)
4081                 self.package = package
4082                 self.atom = portage.dep.Atom("=" + package.cpv)
4083                 self.set = (self.atom, )
4084
4085 class SetArg(DependencyArg):
4086         def __init__(self, set=None, **kwargs):
4087                 DependencyArg.__init__(self, **kwargs)
4088                 self.set = set
4089                 self.name = self.arg[len(SETPREFIX):]
4090
4091 class Dependency(SlotObject):
4092         __slots__ = ("atom", "blocker", "depth",
4093                 "parent", "onlydeps", "priority", "root")
4094         def __init__(self, **kwargs):
4095                 SlotObject.__init__(self, **kwargs)
4096                 if self.priority is None:
4097                         self.priority = DepPriority()
4098                 if self.depth is None:
4099                         self.depth = 0
4100
4101 class BlockerCache(portage.cache.mappings.MutableMapping):
4102         """This caches blockers of installed packages so that dep_check does not
4103         have to be done for every single installed package on every invocation of
4104         emerge.  The cache is invalidated whenever it is detected that something
4105         has changed that might alter the results of dep_check() calls:
4106                 1) the set of installed packages (including COUNTER) has changed
4107                 2) the old-style virtuals have changed
4108         """
4109
4110         # Number of uncached packages to trigger cache update, since
4111         # it's wasteful to update it for every vdb change.
4112         _cache_threshold = 5
4113
4114         class BlockerData(object):
4115
4116                 __slots__ = ("__weakref__", "atoms", "counter")
4117
4118                 def __init__(self, counter, atoms):
4119                         self.counter = counter
4120                         self.atoms = atoms
4121
4122         def __init__(self, myroot, vardb):
4123                 self._vardb = vardb
4124                 self._virtuals = vardb.settings.getvirtuals()
4125                 self._cache_filename = os.path.join(myroot,
4126                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4127                 self._cache_version = "1"
4128                 self._cache_data = None
4129                 self._modified = set()
4130                 self._load()
4131
4132         def _load(self):
4133                 try:
4134                         f = open(self._cache_filename, mode='rb')
4135                         mypickle = pickle.Unpickler(f)
4136                         try:
4137                                 mypickle.find_global = None
4138                         except AttributeError:
4139                                 # TODO: If py3k, override Unpickler.find_class().
4140                                 pass
4141                         self._cache_data = mypickle.load()
4142                         f.close()
4143                         del f
4144                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4145                         if isinstance(e, pickle.UnpicklingError):
4146                                 writemsg("!!! Error loading '%s': %s\n" % \
4147                                         (self._cache_filename, str(e)), noiselevel=-1)
4148                         del e
4149
4150                 cache_valid = self._cache_data and \
4151                         isinstance(self._cache_data, dict) and \
4152                         self._cache_data.get("version") == self._cache_version and \
4153                         isinstance(self._cache_data.get("blockers"), dict)
4154                 if cache_valid:
4155                         # Validate all the atoms and counters so that
4156                         # corruption is detected as soon as possible.
4157                         invalid_items = set()
4158                         for k, v in self._cache_data["blockers"].iteritems():
4159                                 if not isinstance(k, basestring):
4160                                         invalid_items.add(k)
4161                                         continue
4162                                 try:
4163                                         if portage.catpkgsplit(k) is None:
4164                                                 invalid_items.add(k)
4165                                                 continue
4166                                 except portage.exception.InvalidData:
4167                                         invalid_items.add(k)
4168                                         continue
4169                                 if not isinstance(v, tuple) or \
4170                                         len(v) != 2:
4171                                         invalid_items.add(k)
4172                                         continue
4173                                 counter, atoms = v
4174                                 if not isinstance(counter, (int, long)):
4175                                         invalid_items.add(k)
4176                                         continue
4177                                 if not isinstance(atoms, (list, tuple)):
4178                                         invalid_items.add(k)
4179                                         continue
4180                                 invalid_atom = False
4181                                 for atom in atoms:
4182                                         if not isinstance(atom, basestring):
4183                                                 invalid_atom = True
4184                                                 break
4185                                         if atom[:1] != "!" or \
4186                                                 not portage.isvalidatom(
4187                                                 atom, allow_blockers=True):
4188                                                 invalid_atom = True
4189                                                 break
4190                                 if invalid_atom:
4191                                         invalid_items.add(k)
4192                                         continue
4193
4194                         for k in invalid_items:
4195                                 del self._cache_data["blockers"][k]
4196                         if not self._cache_data["blockers"]:
4197                                 cache_valid = False
4198
4199                 if not cache_valid:
4200                         self._cache_data = {"version":self._cache_version}
4201                         self._cache_data["blockers"] = {}
4202                         self._cache_data["virtuals"] = self._virtuals
4203                 self._modified.clear()
4204
4205         def flush(self):
4206                 """If the current user has permission and the internal blocker cache
4207                 been updated, save it to disk and mark it unmodified.  This is called
4208                 by emerge after it has proccessed blockers for all installed packages.
4209                 Currently, the cache is only written if the user has superuser
4210                 privileges (since that's required to obtain a lock), but all users
4211                 have read access and benefit from faster blocker lookups (as long as
4212                 the entire cache is still valid).  The cache is stored as a pickled
4213                 dict object with the following format:
4214
4215                 {
4216                         version : "1",
4217                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4218                         "virtuals" : vardb.settings.getvirtuals()
4219                 }
4220                 """
4221                 if len(self._modified) >= self._cache_threshold and \
4222                         secpass >= 2:
4223                         try:
4224                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4225                                 pickle.dump(self._cache_data, f, protocol=2)
4226                                 f.close()
4227                                 portage.util.apply_secpass_permissions(
4228                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4229                         except (IOError, OSError), e:
4230                                 pass
4231                         self._modified.clear()
4232
4233         def __setitem__(self, cpv, blocker_data):
4234                 """
4235                 Update the cache and mark it as modified for a future call to
4236                 self.flush().
4237
4238                 @param cpv: Package for which to cache blockers.
4239                 @type cpv: String
4240                 @param blocker_data: An object with counter and atoms attributes.
4241                 @type blocker_data: BlockerData
4242                 """
4243                 self._cache_data["blockers"][cpv] = \
4244                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4245                 self._modified.add(cpv)
4246
4247         def __iter__(self):
4248                 if self._cache_data is None:
4249                         # triggered by python-trace
4250                         return iter([])
4251                 return iter(self._cache_data["blockers"])
4252
4253         def __delitem__(self, cpv):
4254                 del self._cache_data["blockers"][cpv]
4255
4256         def __getitem__(self, cpv):
4257                 """
4258                 @rtype: BlockerData
4259                 @returns: An object with counter and atoms attributes.
4260                 """
4261                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4262
4263 class BlockerDB(object):
4264
4265         def __init__(self, root_config):
4266                 self._root_config = root_config
4267                 self._vartree = root_config.trees["vartree"]
4268                 self._portdb = root_config.trees["porttree"].dbapi
4269
4270                 self._dep_check_trees = None
4271                 self._fake_vartree = None
4272
4273         def _get_fake_vartree(self, acquire_lock=0):
4274                 fake_vartree = self._fake_vartree
4275                 if fake_vartree is None:
4276                         fake_vartree = FakeVartree(self._root_config,
4277                                 acquire_lock=acquire_lock)
4278                         self._fake_vartree = fake_vartree
4279                         self._dep_check_trees = { self._vartree.root : {
4280                                 "porttree"    :  fake_vartree,
4281                                 "vartree"     :  fake_vartree,
4282                         }}
4283                 else:
4284                         fake_vartree.sync(acquire_lock=acquire_lock)
4285                 return fake_vartree
4286
4287         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4288                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4289                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4290                 settings = self._vartree.settings
4291                 stale_cache = set(blocker_cache)
4292                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4293                 dep_check_trees = self._dep_check_trees
4294                 vardb = fake_vartree.dbapi
4295                 installed_pkgs = list(vardb)
4296
4297                 for inst_pkg in installed_pkgs:
4298                         stale_cache.discard(inst_pkg.cpv)
4299                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4300                         if cached_blockers is not None and \
4301                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4302                                 cached_blockers = None
4303                         if cached_blockers is not None:
4304                                 blocker_atoms = cached_blockers.atoms
4305                         else:
4306                                 # Use aux_get() to trigger FakeVartree global
4307                                 # updates on *DEPEND when appropriate.
4308                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4309                                 try:
4310                                         portage.dep._dep_check_strict = False
4311                                         success, atoms = portage.dep_check(depstr,
4312                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4313                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4314                                 finally:
4315                                         portage.dep._dep_check_strict = True
4316                                 if not success:
4317                                         pkg_location = os.path.join(inst_pkg.root,
4318                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4319                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4320                                                 (pkg_location, atoms), noiselevel=-1)
4321                                         continue
4322
4323                                 blocker_atoms = [atom for atom in atoms \
4324                                         if atom.startswith("!")]
4325                                 blocker_atoms.sort()
4326                                 counter = long(inst_pkg.metadata["COUNTER"])
4327                                 blocker_cache[inst_pkg.cpv] = \
4328                                         blocker_cache.BlockerData(counter, blocker_atoms)
4329                 for cpv in stale_cache:
4330                         del blocker_cache[cpv]
4331                 blocker_cache.flush()
4332
4333                 blocker_parents = digraph()
4334                 blocker_atoms = []
4335                 for pkg in installed_pkgs:
4336                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4337                                 blocker_atom = blocker_atom.lstrip("!")
4338                                 blocker_atoms.append(blocker_atom)
4339                                 blocker_parents.add(blocker_atom, pkg)
4340
4341                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4342                 blocking_pkgs = set()
4343                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4344                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4345
4346                 # Check for blockers in the other direction.
4347                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4348                 try:
4349                         portage.dep._dep_check_strict = False
4350                         success, atoms = portage.dep_check(depstr,
4351                                 vardb, settings, myuse=new_pkg.use.enabled,
4352                                 trees=dep_check_trees, myroot=new_pkg.root)
4353                 finally:
4354                         portage.dep._dep_check_strict = True
4355                 if not success:
4356                         # We should never get this far with invalid deps.
4357                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4358                         assert False
4359
4360                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4361                         if atom[:1] == "!"]
4362                 if blocker_atoms:
4363                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4364                         for inst_pkg in installed_pkgs:
4365                                 try:
4366                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4367                                 except (portage.exception.InvalidDependString, StopIteration):
4368                                         continue
4369                                 blocking_pkgs.add(inst_pkg)
4370
4371                 return blocking_pkgs
4372
4373 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4374
4375         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4376                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4377         p_type, p_root, p_key, p_status = parent_node
4378         msg = []
4379         if p_status == "nomerge":
4380                 category, pf = portage.catsplit(p_key)
4381                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4382                 msg.append("Portage is unable to process the dependencies of the ")
4383                 msg.append("'%s' package. " % p_key)
4384                 msg.append("In order to correct this problem, the package ")
4385                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4386                 msg.append("As a temporary workaround, the --nodeps option can ")
4387                 msg.append("be used to ignore all dependencies.  For reference, ")
4388                 msg.append("the problematic dependencies can be found in the ")
4389                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4390         else:
4391                 msg.append("This package can not be installed. ")
4392                 msg.append("Please notify the '%s' package maintainer " % p_key)
4393                 msg.append("about this problem.")
4394
4395         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4396         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4397
4398 class PackageVirtualDbapi(portage.dbapi):
4399         """
4400         A dbapi-like interface class that represents the state of the installed
4401         package database as new packages are installed, replacing any packages
4402         that previously existed in the same slot. The main difference between
4403         this class and fakedbapi is that this one uses Package instances
4404         internally (passed in via cpv_inject() and cpv_remove() calls).
4405         """
4406         def __init__(self, settings):
4407                 portage.dbapi.__init__(self)
4408                 self.settings = settings
4409                 self._match_cache = {}
4410                 self._cp_map = {}
4411                 self._cpv_map = {}
4412
4413         def clear(self):
4414                 """
4415                 Remove all packages.
4416                 """
4417                 if self._cpv_map:
4418                         self._clear_cache()
4419                         self._cp_map.clear()
4420                         self._cpv_map.clear()
4421
4422         def copy(self):
4423                 obj = PackageVirtualDbapi(self.settings)
4424                 obj._match_cache = self._match_cache.copy()
4425                 obj._cp_map = self._cp_map.copy()
4426                 for k, v in obj._cp_map.iteritems():
4427                         obj._cp_map[k] = v[:]
4428                 obj._cpv_map = self._cpv_map.copy()
4429                 return obj
4430
4431         def __iter__(self):
4432                 return self._cpv_map.itervalues()
4433
4434         def __contains__(self, item):
4435                 existing = self._cpv_map.get(item.cpv)
4436                 if existing is not None and \
4437                         existing == item:
4438                         return True
4439                 return False
4440
4441         def get(self, item, default=None):
4442                 cpv = getattr(item, "cpv", None)
4443                 if cpv is None:
4444                         if len(item) != 4:
4445                                 return default
4446                         type_name, root, cpv, operation = item
4447
4448                 existing = self._cpv_map.get(cpv)
4449                 if existing is not None and \
4450                         existing == item:
4451                         return existing
4452                 return default
4453
4454         def match_pkgs(self, atom):
4455                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4456
4457         def _clear_cache(self):
4458                 if self._categories is not None:
4459                         self._categories = None
4460                 if self._match_cache:
4461                         self._match_cache = {}
4462
4463         def match(self, origdep, use_cache=1):
4464                 result = self._match_cache.get(origdep)
4465                 if result is not None:
4466                         return result[:]
4467                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4468                 self._match_cache[origdep] = result
4469                 return result[:]
4470
4471         def cpv_exists(self, cpv):
4472                 return cpv in self._cpv_map
4473
4474         def cp_list(self, mycp, use_cache=1):
4475                 cachelist = self._match_cache.get(mycp)
4476                 # cp_list() doesn't expand old-style virtuals
4477                 if cachelist and cachelist[0].startswith(mycp):
4478                         return cachelist[:]
4479                 cpv_list = self._cp_map.get(mycp)
4480                 if cpv_list is None:
4481                         cpv_list = []
4482                 else:
4483                         cpv_list = [pkg.cpv for pkg in cpv_list]
4484                 self._cpv_sort_ascending(cpv_list)
4485                 if not (not cpv_list and mycp.startswith("virtual/")):
4486                         self._match_cache[mycp] = cpv_list
4487                 return cpv_list[:]
4488
4489         def cp_all(self):
4490                 return list(self._cp_map)
4491
4492         def cpv_all(self):
4493                 return list(self._cpv_map)
4494
4495         def cpv_inject(self, pkg):
4496                 cp_list = self._cp_map.get(pkg.cp)
4497                 if cp_list is None:
4498                         cp_list = []
4499                         self._cp_map[pkg.cp] = cp_list
4500                 e_pkg = self._cpv_map.get(pkg.cpv)
4501                 if e_pkg is not None:
4502                         if e_pkg == pkg:
4503                                 return
4504                         self.cpv_remove(e_pkg)
4505                 for e_pkg in cp_list:
4506                         if e_pkg.slot_atom == pkg.slot_atom:
4507                                 if e_pkg == pkg:
4508                                         return
4509                                 self.cpv_remove(e_pkg)
4510                                 break
4511                 cp_list.append(pkg)
4512                 self._cpv_map[pkg.cpv] = pkg
4513                 self._clear_cache()
4514
4515         def cpv_remove(self, pkg):
4516                 old_pkg = self._cpv_map.get(pkg.cpv)
4517                 if old_pkg != pkg:
4518                         raise KeyError(pkg)
4519                 self._cp_map[pkg.cp].remove(pkg)
4520                 del self._cpv_map[pkg.cpv]
4521                 self._clear_cache()
4522
4523         def aux_get(self, cpv, wants):
4524                 metadata = self._cpv_map[cpv].metadata
4525                 return [metadata.get(x, "") for x in wants]
4526
4527         def aux_update(self, cpv, values):
4528                 self._cpv_map[cpv].metadata.update(values)
4529                 self._clear_cache()
4530
4531 class depgraph(object):
4532
4533         pkg_tree_map = RootConfig.pkg_tree_map
4534
4535         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4536
4537         def __init__(self, settings, trees, myopts, myparams, spinner):
4538                 self.settings = settings
4539                 self.target_root = settings["ROOT"]
4540                 self.myopts = myopts
4541                 self.myparams = myparams
4542                 self.edebug = 0
4543                 if settings.get("PORTAGE_DEBUG", "") == "1":
4544                         self.edebug = 1
4545                 self.spinner = spinner
4546                 self._running_root = trees["/"]["root_config"]
4547                 self._opts_no_restart = Scheduler._opts_no_restart
4548                 self.pkgsettings = {}
4549                 # Maps slot atom to package for each Package added to the graph.
4550                 self._slot_pkg_map = {}
4551                 # Maps nodes to the reasons they were selected for reinstallation.
4552                 self._reinstall_nodes = {}
4553                 self.mydbapi = {}
4554                 self.trees = {}
4555                 self._trees_orig = trees
4556                 self.roots = {}
4557                 # Contains a filtered view of preferred packages that are selected
4558                 # from available repositories.
4559                 self._filtered_trees = {}
4560                 # Contains installed packages and new packages that have been added
4561                 # to the graph.
4562                 self._graph_trees = {}
4563                 # All Package instances
4564                 self._pkg_cache = {}
4565                 for myroot in trees:
4566                         self.trees[myroot] = {}
4567                         # Create a RootConfig instance that references
4568                         # the FakeVartree instead of the real one.
4569                         self.roots[myroot] = RootConfig(
4570                                 trees[myroot]["vartree"].settings,
4571                                 self.trees[myroot],
4572                                 trees[myroot]["root_config"].setconfig)
4573                         for tree in ("porttree", "bintree"):
4574                                 self.trees[myroot][tree] = trees[myroot][tree]
4575                         self.trees[myroot]["vartree"] = \
4576                                 FakeVartree(trees[myroot]["root_config"],
4577                                         pkg_cache=self._pkg_cache)
4578                         self.pkgsettings[myroot] = portage.config(
4579                                 clone=self.trees[myroot]["vartree"].settings)
4580                         self._slot_pkg_map[myroot] = {}
4581                         vardb = self.trees[myroot]["vartree"].dbapi
4582                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4583                                 "--buildpkgonly" not in self.myopts
4584                         # This fakedbapi instance will model the state that the vdb will
4585                         # have after new packages have been installed.
4586                         fakedb = PackageVirtualDbapi(vardb.settings)
4587                         if preload_installed_pkgs:
4588                                 for pkg in vardb:
4589                                         self.spinner.update()
4590                                         # This triggers metadata updates via FakeVartree.
4591                                         vardb.aux_get(pkg.cpv, [])
4592                                         fakedb.cpv_inject(pkg)
4593
4594                         # Now that the vardb state is cached in our FakeVartree,
4595                         # we won't be needing the real vartree cache for awhile.
4596                         # To make some room on the heap, clear the vardbapi
4597                         # caches.
4598                         trees[myroot]["vartree"].dbapi._clear_cache()
4599                         gc.collect()
4600
4601                         self.mydbapi[myroot] = fakedb
4602                         def graph_tree():
4603                                 pass
4604                         graph_tree.dbapi = fakedb
4605                         self._graph_trees[myroot] = {}
4606                         self._filtered_trees[myroot] = {}
4607                         # Substitute the graph tree for the vartree in dep_check() since we
4608                         # want atom selections to be consistent with package selections
4609                         # have already been made.
4610                         self._graph_trees[myroot]["porttree"]   = graph_tree
4611                         self._graph_trees[myroot]["vartree"]    = graph_tree
4612                         def filtered_tree():
4613                                 pass
4614                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4615                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4616
4617                         # Passing in graph_tree as the vartree here could lead to better
4618                         # atom selections in some cases by causing atoms for packages that
4619                         # have been added to the graph to be preferred over other choices.
4620                         # However, it can trigger atom selections that result in
4621                         # unresolvable direct circular dependencies. For example, this
4622                         # happens with gwydion-dylan which depends on either itself or
4623                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4624                         # gwydion-dylan-bin needs to be selected in order to avoid a
4625                         # an unresolvable direct circular dependency.
4626                         #
4627                         # To solve the problem described above, pass in "graph_db" so that
4628                         # packages that have been added to the graph are distinguishable
4629                         # from other available packages and installed packages. Also, pass
4630                         # the parent package into self._select_atoms() calls so that
4631                         # unresolvable direct circular dependencies can be detected and
4632                         # avoided when possible.
4633                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4634                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4635
4636                         dbs = []
4637                         portdb = self.trees[myroot]["porttree"].dbapi
4638                         bindb  = self.trees[myroot]["bintree"].dbapi
4639                         vardb  = self.trees[myroot]["vartree"].dbapi
4640                         #               (db, pkg_type, built, installed, db_keys)
4641                         if "--usepkgonly" not in self.myopts:
4642                                 db_keys = list(portdb._aux_cache_keys)
4643                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4644                         if "--usepkg" in self.myopts:
4645                                 db_keys = list(bindb._aux_cache_keys)
4646                                 dbs.append((bindb,  "binary", True, False, db_keys))
4647                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4648                         dbs.append((vardb, "installed", True, True, db_keys))
4649                         self._filtered_trees[myroot]["dbs"] = dbs
4650                         if "--usepkg" in self.myopts:
4651                                 self.trees[myroot]["bintree"].populate(
4652                                         "--getbinpkg" in self.myopts,
4653                                         "--getbinpkgonly" in self.myopts)
4654                 del trees
4655
4656                 self.digraph=portage.digraph()
4657                 # contains all sets added to the graph
4658                 self._sets = {}
4659                 # contains atoms given as arguments
4660                 self._sets["args"] = InternalPackageSet()
4661                 # contains all atoms from all sets added to the graph, including
4662                 # atoms given as arguments
4663                 self._set_atoms = InternalPackageSet()
4664                 self._atom_arg_map = {}
4665                 # contains all nodes pulled in by self._set_atoms
4666                 self._set_nodes = set()
4667                 # Contains only Blocker -> Uninstall edges
4668                 self._blocker_uninstalls = digraph()
4669                 # Contains only Package -> Blocker edges
4670                 self._blocker_parents = digraph()
4671                 # Contains only irrelevant Package -> Blocker edges
4672                 self._irrelevant_blockers = digraph()
4673                 # Contains only unsolvable Package -> Blocker edges
4674                 self._unsolvable_blockers = digraph()
4675                 # Contains all Blocker -> Blocked Package edges
4676                 self._blocked_pkgs = digraph()
4677                 # Contains world packages that have been protected from
4678                 # uninstallation but may not have been added to the graph
4679                 # if the graph is not complete yet.
4680                 self._blocked_world_pkgs = {}
4681                 self._slot_collision_info = {}
4682                 # Slot collision nodes are not allowed to block other packages since
4683                 # blocker validation is only able to account for one package per slot.
4684                 self._slot_collision_nodes = set()
4685                 self._parent_atoms = {}
4686                 self._slot_conflict_parent_atoms = set()
4687                 self._serialized_tasks_cache = None
4688                 self._scheduler_graph = None
4689                 self._displayed_list = None
4690                 self._pprovided_args = []
4691                 self._missing_args = []
4692                 self._masked_installed = set()
4693                 self._unsatisfied_deps_for_display = []
4694                 self._unsatisfied_blockers_for_display = None
4695                 self._circular_deps_for_display = None
4696                 self._dep_stack = []
4697                 self._unsatisfied_deps = []
4698                 self._initially_unsatisfied_deps = []
4699                 self._ignored_deps = []
4700                 self._required_set_names = set(["system", "world"])
4701                 self._select_atoms = self._select_atoms_highest_available
4702                 self._select_package = self._select_pkg_highest_available
4703                 self._highest_pkg_cache = {}
4704
4705         def _show_slot_collision_notice(self):
4706                 """Show an informational message advising the user to mask one of the
4707                 the packages. In some cases it may be possible to resolve this
4708                 automatically, but support for backtracking (removal nodes that have
4709                 already been selected) will be required in order to handle all possible
4710                 cases.
4711                 """
4712
4713                 if not self._slot_collision_info:
4714                         return
4715
4716                 self._show_merge_list()
4717
4718                 msg = []
4719                 msg.append("\n!!! Multiple package instances within a single " + \
4720                         "package slot have been pulled\n")
4721                 msg.append("!!! into the dependency graph, resulting" + \
4722                         " in a slot conflict:\n\n")
4723                 indent = "  "
4724                 # Max number of parents shown, to avoid flooding the display.
4725                 max_parents = 3
4726                 explanation_columns = 70
4727                 explanations = 0
4728                 for (slot_atom, root), slot_nodes \
4729                         in self._slot_collision_info.iteritems():
4730                         msg.append(str(slot_atom))
4731                         msg.append("\n\n")
4732
4733                         for node in slot_nodes:
4734                                 msg.append(indent)
4735                                 msg.append(str(node))
4736                                 parent_atoms = self._parent_atoms.get(node)
4737                                 if parent_atoms:
4738                                         pruned_list = set()
4739                                         # Prefer conflict atoms over others.
4740                                         for parent_atom in parent_atoms:
4741                                                 if len(pruned_list) >= max_parents:
4742                                                         break
4743                                                 if parent_atom in self._slot_conflict_parent_atoms:
4744                                                         pruned_list.add(parent_atom)
4745
4746                                         # If this package was pulled in by conflict atoms then
4747                                         # show those alone since those are the most interesting.
4748                                         if not pruned_list:
4749                                                 # When generating the pruned list, prefer instances
4750                                                 # of DependencyArg over instances of Package.
4751                                                 for parent_atom in parent_atoms:
4752                                                         if len(pruned_list) >= max_parents:
4753                                                                 break
4754                                                         parent, atom = parent_atom
4755                                                         if isinstance(parent, DependencyArg):
4756                                                                 pruned_list.add(parent_atom)
4757                                                 # Prefer Packages instances that themselves have been
4758                                                 # pulled into collision slots.
4759                                                 for parent_atom in parent_atoms:
4760                                                         if len(pruned_list) >= max_parents:
4761                                                                 break
4762                                                         parent, atom = parent_atom
4763                                                         if isinstance(parent, Package) and \
4764                                                                 (parent.slot_atom, parent.root) \
4765                                                                 in self._slot_collision_info:
4766                                                                 pruned_list.add(parent_atom)
4767                                                 for parent_atom in parent_atoms:
4768                                                         if len(pruned_list) >= max_parents:
4769                                                                 break
4770                                                         pruned_list.add(parent_atom)
4771                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4772                                         parent_atoms = pruned_list
4773                                         msg.append(" pulled in by\n")
4774                                         for parent_atom in parent_atoms:
4775                                                 parent, atom = parent_atom
4776                                                 msg.append(2*indent)
4777                                                 if isinstance(parent,
4778                                                         (PackageArg, AtomArg)):
4779                                                         # For PackageArg and AtomArg types, it's
4780                                                         # redundant to display the atom attribute.
4781                                                         msg.append(str(parent))
4782                                                 else:
4783                                                         # Display the specific atom from SetArg or
4784                                                         # Package types.
4785                                                         msg.append("%s required by %s" % (atom, parent))
4786                                                 msg.append("\n")
4787                                         if omitted_parents:
4788                                                 msg.append(2*indent)
4789                                                 msg.append("(and %d more)\n" % omitted_parents)
4790                                 else:
4791                                         msg.append(" (no parents)\n")
4792                                 msg.append("\n")
4793                         explanation = self._slot_conflict_explanation(slot_nodes)
4794                         if explanation:
4795                                 explanations += 1
4796                                 msg.append(indent + "Explanation:\n\n")
4797                                 for line in textwrap.wrap(explanation, explanation_columns):
4798                                         msg.append(2*indent + line + "\n")
4799                                 msg.append("\n")
4800                 msg.append("\n")
4801                 sys.stderr.write("".join(msg))
4802                 sys.stderr.flush()
4803
4804                 explanations_for_all = explanations == len(self._slot_collision_info)
4805
4806                 if explanations_for_all or "--quiet" in self.myopts:
4807                         return
4808
4809                 msg = []
4810                 msg.append("It may be possible to solve this problem ")
4811                 msg.append("by using package.mask to prevent one of ")
4812                 msg.append("those packages from being selected. ")
4813                 msg.append("However, it is also possible that conflicting ")
4814                 msg.append("dependencies exist such that they are impossible to ")
4815                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4816                 msg.append("the dependencies of two different packages, then those ")
4817                 msg.append("packages can not be installed simultaneously.")
4818
4819                 from formatter import AbstractFormatter, DumbWriter
4820                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4821                 for x in msg:
4822                         f.add_flowing_data(x)
4823                 f.end_paragraph(1)
4824
4825                 msg = []
4826                 msg.append("For more information, see MASKED PACKAGES ")
4827                 msg.append("section in the emerge man page or refer ")
4828                 msg.append("to the Gentoo Handbook.")
4829                 for x in msg:
4830                         f.add_flowing_data(x)
4831                 f.end_paragraph(1)
4832                 f.writer.flush()
4833
4834         def _slot_conflict_explanation(self, slot_nodes):
4835                 """
4836                 When a slot conflict occurs due to USE deps, there are a few
4837                 different cases to consider:
4838
4839                 1) New USE are correctly set but --newuse wasn't requested so an
4840                    installed package with incorrect USE happened to get pulled
4841                    into graph before the new one.
4842
4843                 2) New USE are incorrectly set but an installed package has correct
4844                    USE so it got pulled into the graph, and a new instance also got
4845                    pulled in due to --newuse or an upgrade.
4846
4847                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4848                    and multiple package instances got pulled into the same slot to
4849                    satisfy the conflicting deps.
4850
4851                 Currently, explanations and suggested courses of action are generated
4852                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4853                 """
4854
4855                 if len(slot_nodes) != 2:
4856                         # Suggestions are only implemented for
4857                         # conflicts between two packages.
4858                         return None
4859
4860                 all_conflict_atoms = self._slot_conflict_parent_atoms
4861                 matched_node = None
4862                 matched_atoms = None
4863                 unmatched_node = None
4864                 for node in slot_nodes:
4865                         parent_atoms = self._parent_atoms.get(node)
4866                         if not parent_atoms:
4867                                 # Normally, there are always parent atoms. If there are
4868                                 # none then something unexpected is happening and there's
4869                                 # currently no suggestion for this case.
4870                                 return None
4871                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4872                         for parent_atom in conflict_atoms:
4873                                 parent, atom = parent_atom
4874                                 if not atom.use:
4875                                         # Suggestions are currently only implemented for cases
4876                                         # in which all conflict atoms have USE deps.
4877                                         return None
4878                         if conflict_atoms:
4879                                 if matched_node is not None:
4880                                         # If conflict atoms match multiple nodes
4881                                         # then there's no suggestion.
4882                                         return None
4883                                 matched_node = node
4884                                 matched_atoms = conflict_atoms
4885                         else:
4886                                 if unmatched_node is not None:
4887                                         # Neither node is matched by conflict atoms, and
4888                                         # there is no suggestion for this case.
4889                                         return None
4890                                 unmatched_node = node
4891
4892                 if matched_node is None or unmatched_node is None:
4893                         # This shouldn't happen.
4894                         return None
4895
4896                 if unmatched_node.installed and not matched_node.installed and \
4897                         unmatched_node.cpv == matched_node.cpv:
4898                         # If the conflicting packages are the same version then
4899                         # --newuse should be all that's needed. If they are different
4900                         # versions then there's some other problem.
4901                         return "New USE are correctly set, but --newuse wasn't" + \
4902                                 " requested, so an installed package with incorrect USE " + \
4903                                 "happened to get pulled into the dependency graph. " + \
4904                                 "In order to solve " + \
4905                                 "this, either specify the --newuse option or explicitly " + \
4906                                 " reinstall '%s'." % matched_node.slot_atom
4907
4908                 if matched_node.installed and not unmatched_node.installed:
4909                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4910                         explanation = ("New USE for '%s' are incorrectly set. " + \
4911                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4912                                 (matched_node.slot_atom, atoms[0])
4913                         if len(atoms) > 1:
4914                                 for atom in atoms[1:-1]:
4915                                         explanation += ", '%s'" % (atom,)
4916                                 if len(atoms) > 2:
4917                                         explanation += ","
4918                                 explanation += " and '%s'" % (atoms[-1],)
4919                         explanation += "."
4920                         return explanation
4921
4922                 return None
4923
4924         def _process_slot_conflicts(self):
4925                 """
4926                 Process slot conflict data to identify specific atoms which
4927                 lead to conflict. These atoms only match a subset of the
4928                 packages that have been pulled into a given slot.
4929                 """
4930                 for (slot_atom, root), slot_nodes \
4931                         in self._slot_collision_info.iteritems():
4932
4933                         all_parent_atoms = set()
4934                         for pkg in slot_nodes:
4935                                 parent_atoms = self._parent_atoms.get(pkg)
4936                                 if not parent_atoms:
4937                                         continue
4938                                 all_parent_atoms.update(parent_atoms)
4939
4940                         for pkg in slot_nodes:
4941                                 parent_atoms = self._parent_atoms.get(pkg)
4942                                 if parent_atoms is None:
4943                                         parent_atoms = set()
4944                                         self._parent_atoms[pkg] = parent_atoms
4945                                 for parent_atom in all_parent_atoms:
4946                                         if parent_atom in parent_atoms:
4947                                                 continue
4948                                         # Use package set for matching since it will match via
4949                                         # PROVIDE when necessary, while match_from_list does not.
4950                                         parent, atom = parent_atom
4951                                         atom_set = InternalPackageSet(
4952                                                 initial_atoms=(atom,))
4953                                         if atom_set.findAtomForPackage(pkg):
4954                                                 parent_atoms.add(parent_atom)
4955                                         else:
4956                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4957
4958         def _reinstall_for_flags(self, forced_flags,
4959                 orig_use, orig_iuse, cur_use, cur_iuse):
4960                 """Return a set of flags that trigger reinstallation, or None if there
4961                 are no such flags."""
4962                 if "--newuse" in self.myopts:
4963                         flags = set(orig_iuse.symmetric_difference(
4964                                 cur_iuse).difference(forced_flags))
4965                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4966                                 cur_iuse.intersection(cur_use)))
4967                         if flags:
4968                                 return flags
4969                 elif "changed-use" == self.myopts.get("--reinstall"):
4970                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4971                                 cur_iuse.intersection(cur_use))
4972                         if flags:
4973                                 return flags
4974                 return None
4975
4976         def _create_graph(self, allow_unsatisfied=False):
4977                 dep_stack = self._dep_stack
4978                 while dep_stack:
4979                         self.spinner.update()
4980                         dep = dep_stack.pop()
4981                         if isinstance(dep, Package):
4982                                 if not self._add_pkg_deps(dep,
4983                                         allow_unsatisfied=allow_unsatisfied):
4984                                         return 0
4985                                 continue
4986                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4987                                 return 0
4988                 return 1
4989
4990         def _add_dep(self, dep, allow_unsatisfied=False):
4991                 debug = "--debug" in self.myopts
4992                 buildpkgonly = "--buildpkgonly" in self.myopts
4993                 nodeps = "--nodeps" in self.myopts
4994                 empty = "empty" in self.myparams
4995                 deep = "deep" in self.myparams
4996                 update = "--update" in self.myopts and dep.depth <= 1
4997                 if dep.blocker:
4998                         if not buildpkgonly and \
4999                                 not nodeps and \
5000                                 dep.parent not in self._slot_collision_nodes:
5001                                 if dep.parent.onlydeps:
5002                                         # It's safe to ignore blockers if the
5003                                         # parent is an --onlydeps node.
5004                                         return 1
5005                                 # The blocker applies to the root where
5006                                 # the parent is or will be installed.
5007                                 blocker = Blocker(atom=dep.atom,
5008                                         eapi=dep.parent.metadata["EAPI"],
5009                                         root=dep.parent.root)
5010                                 self._blocker_parents.add(blocker, dep.parent)
5011                         return 1
5012                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5013                         onlydeps=dep.onlydeps)
5014                 if not dep_pkg:
5015                         if dep.priority.optional:
5016                                 # This could be an unecessary build-time dep
5017                                 # pulled in by --with-bdeps=y.
5018                                 return 1
5019                         if allow_unsatisfied:
5020                                 self._unsatisfied_deps.append(dep)
5021                                 return 1
5022                         self._unsatisfied_deps_for_display.append(
5023                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5024                         return 0
5025                 # In some cases, dep_check will return deps that shouldn't
5026                 # be proccessed any further, so they are identified and
5027                 # discarded here. Try to discard as few as possible since
5028                 # discarded dependencies reduce the amount of information
5029                 # available for optimization of merge order.
5030                 if dep.priority.satisfied and \
5031                         not dep_pkg.installed and \
5032                         not (existing_node or empty or deep or update):
5033                         myarg = None
5034                         if dep.root == self.target_root:
5035                                 try:
5036                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5037                                 except StopIteration:
5038                                         pass
5039                                 except portage.exception.InvalidDependString:
5040                                         if not dep_pkg.installed:
5041                                                 # This shouldn't happen since the package
5042                                                 # should have been masked.
5043                                                 raise
5044                         if not myarg:
5045                                 self._ignored_deps.append(dep)
5046                                 return 1
5047
5048                 if not self._add_pkg(dep_pkg, dep):
5049                         return 0
5050                 return 1
5051
5052         def _add_pkg(self, pkg, dep):
5053                 myparent = None
5054                 priority = None
5055                 depth = 0
5056                 if dep is None:
5057                         dep = Dependency()
5058                 else:
5059                         myparent = dep.parent
5060                         priority = dep.priority
5061                         depth = dep.depth
5062                 if priority is None:
5063                         priority = DepPriority()
5064                 """
5065                 Fills the digraph with nodes comprised of packages to merge.
5066                 mybigkey is the package spec of the package to merge.
5067                 myparent is the package depending on mybigkey ( or None )
5068                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5069                         Think --onlydeps, we need to ignore packages in that case.
5070                 #stuff to add:
5071                 #SLOT-aware emerge
5072                 #IUSE-aware emerge -> USE DEP aware depgraph
5073                 #"no downgrade" emerge
5074                 """
5075                 # Ensure that the dependencies of the same package
5076                 # are never processed more than once.
5077                 previously_added = pkg in self.digraph
5078
5079                 # select the correct /var database that we'll be checking against
5080                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5081                 pkgsettings = self.pkgsettings[pkg.root]
5082
5083                 arg_atoms = None
5084                 if True:
5085                         try:
5086                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5087                         except portage.exception.InvalidDependString, e:
5088                                 if not pkg.installed:
5089                                         show_invalid_depstring_notice(
5090                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5091                                         return 0
5092                                 del e
5093
5094                 if not pkg.onlydeps:
5095                         if not pkg.installed and \
5096                                 "empty" not in self.myparams and \
5097                                 vardbapi.match(pkg.slot_atom):
5098                                 # Increase the priority of dependencies on packages that
5099                                 # are being rebuilt. This optimizes merge order so that
5100                                 # dependencies are rebuilt/updated as soon as possible,
5101                                 # which is needed especially when emerge is called by
5102                                 # revdep-rebuild since dependencies may be affected by ABI
5103                                 # breakage that has rendered them useless. Don't adjust
5104                                 # priority here when in "empty" mode since all packages
5105                                 # are being merged in that case.
5106                                 priority.rebuild = True
5107
5108                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5109                         slot_collision = False
5110                         if existing_node:
5111                                 existing_node_matches = pkg.cpv == existing_node.cpv
5112                                 if existing_node_matches and \
5113                                         pkg != existing_node and \
5114                                         dep.atom is not None:
5115                                         # Use package set for matching since it will match via
5116                                         # PROVIDE when necessary, while match_from_list does not.
5117                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5118                                         if not atom_set.findAtomForPackage(existing_node):
5119                                                 existing_node_matches = False
5120                                 if existing_node_matches:
5121                                         # The existing node can be reused.
5122                                         if arg_atoms:
5123                                                 for parent_atom in arg_atoms:
5124                                                         parent, atom = parent_atom
5125                                                         self.digraph.add(existing_node, parent,
5126                                                                 priority=priority)
5127                                                         self._add_parent_atom(existing_node, parent_atom)
5128                                         # If a direct circular dependency is not an unsatisfied
5129                                         # buildtime dependency then drop it here since otherwise
5130                                         # it can skew the merge order calculation in an unwanted
5131                                         # way.
5132                                         if existing_node != myparent or \
5133                                                 (priority.buildtime and not priority.satisfied):
5134                                                 self.digraph.addnode(existing_node, myparent,
5135                                                         priority=priority)
5136                                                 if dep.atom is not None and dep.parent is not None:
5137                                                         self._add_parent_atom(existing_node,
5138                                                                 (dep.parent, dep.atom))
5139                                         return 1
5140                                 else:
5141
5142                                         # A slot collision has occurred.  Sometimes this coincides
5143                                         # with unresolvable blockers, so the slot collision will be
5144                                         # shown later if there are no unresolvable blockers.
5145                                         self._add_slot_conflict(pkg)
5146                                         slot_collision = True
5147
5148                         if slot_collision:
5149                                 # Now add this node to the graph so that self.display()
5150                                 # can show use flags and --tree portage.output.  This node is
5151                                 # only being partially added to the graph.  It must not be
5152                                 # allowed to interfere with the other nodes that have been
5153                                 # added.  Do not overwrite data for existing nodes in
5154                                 # self.mydbapi since that data will be used for blocker
5155                                 # validation.
5156                                 # Even though the graph is now invalid, continue to process
5157                                 # dependencies so that things like --fetchonly can still
5158                                 # function despite collisions.
5159                                 pass
5160                         elif not previously_added:
5161                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5162                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5163                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5164
5165                         if not pkg.installed:
5166                                 # Allow this package to satisfy old-style virtuals in case it
5167                                 # doesn't already. Any pre-existing providers will be preferred
5168                                 # over this one.
5169                                 try:
5170                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5171                                         # For consistency, also update the global virtuals.
5172                                         settings = self.roots[pkg.root].settings
5173                                         settings.unlock()
5174                                         settings.setinst(pkg.cpv, pkg.metadata)
5175                                         settings.lock()
5176                                 except portage.exception.InvalidDependString, e:
5177                                         show_invalid_depstring_notice(
5178                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5179                                         del e
5180                                         return 0
5181
5182                 if arg_atoms:
5183                         self._set_nodes.add(pkg)
5184
5185                 # Do this even when addme is False (--onlydeps) so that the
5186                 # parent/child relationship is always known in case
5187                 # self._show_slot_collision_notice() needs to be called later.
5188                 self.digraph.add(pkg, myparent, priority=priority)
5189                 if dep.atom is not None and dep.parent is not None:
5190                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5191
5192                 if arg_atoms:
5193                         for parent_atom in arg_atoms:
5194                                 parent, atom = parent_atom
5195                                 self.digraph.add(pkg, parent, priority=priority)
5196                                 self._add_parent_atom(pkg, parent_atom)
5197
5198                 """ This section determines whether we go deeper into dependencies or not.
5199                     We want to go deeper on a few occasions:
5200                     Installing package A, we need to make sure package A's deps are met.
5201                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5202                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5203                 """
5204                 dep_stack = self._dep_stack
5205                 if "recurse" not in self.myparams:
5206                         return 1
5207                 elif pkg.installed and \
5208                         "deep" not in self.myparams:
5209                         dep_stack = self._ignored_deps
5210
5211                 self.spinner.update()
5212
5213                 if arg_atoms:
5214                         depth = 0
5215                 pkg.depth = depth
5216                 if not previously_added:
5217                         dep_stack.append(pkg)
5218                 return 1
5219
5220         def _add_parent_atom(self, pkg, parent_atom):
5221                 parent_atoms = self._parent_atoms.get(pkg)
5222                 if parent_atoms is None:
5223                         parent_atoms = set()
5224                         self._parent_atoms[pkg] = parent_atoms
5225                 parent_atoms.add(parent_atom)
5226
5227         def _add_slot_conflict(self, pkg):
5228                 self._slot_collision_nodes.add(pkg)
5229                 slot_key = (pkg.slot_atom, pkg.root)
5230                 slot_nodes = self._slot_collision_info.get(slot_key)
5231                 if slot_nodes is None:
5232                         slot_nodes = set()
5233                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5234                         self._slot_collision_info[slot_key] = slot_nodes
5235                 slot_nodes.add(pkg)
5236
5237         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5238
5239                 mytype = pkg.type_name
5240                 myroot = pkg.root
5241                 mykey = pkg.cpv
5242                 metadata = pkg.metadata
5243                 myuse = pkg.use.enabled
5244                 jbigkey = pkg
5245                 depth = pkg.depth + 1
5246                 removal_action = "remove" in self.myparams
5247
5248                 edepend={}
5249                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5250                 for k in depkeys:
5251                         edepend[k] = metadata[k]
5252
5253                 if not pkg.built and \
5254                         "--buildpkgonly" in self.myopts and \
5255                         "deep" not in self.myparams and \
5256                         "empty" not in self.myparams:
5257                         edepend["RDEPEND"] = ""
5258                         edepend["PDEPEND"] = ""
5259                 bdeps_optional = False
5260
5261                 if pkg.built and not removal_action:
5262                         if self.myopts.get("--with-bdeps", "n") == "y":
5263                                 # Pull in build time deps as requested, but marked them as
5264                                 # "optional" since they are not strictly required. This allows
5265                                 # more freedom in the merge order calculation for solving
5266                                 # circular dependencies. Don't convert to PDEPEND since that
5267                                 # could make --with-bdeps=y less effective if it is used to
5268                                 # adjust merge order to prevent built_with_use() calls from
5269                                 # failing.
5270                                 bdeps_optional = True
5271                         else:
5272                                 # built packages do not have build time dependencies.
5273                                 edepend["DEPEND"] = ""
5274
5275                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5276                         edepend["DEPEND"] = ""
5277
5278                 bdeps_root = "/"
5279                 if self.target_root != "/":
5280                         if "--root-deps" in self.myopts:
5281                                         bdeps_root = myroot
5282                         if "--rdeps-only" in self.myopts:
5283                                         bdeps_root = "/"
5284                                         edepend["DEPEND"] = ""
5285
5286                 deps = (
5287                         (bdeps_root, edepend["DEPEND"],
5288                                 self._priority(buildtime=(not bdeps_optional),
5289                                 optional=bdeps_optional)),
5290                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5291                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5292                 )
5293
5294                 debug = "--debug" in self.myopts
5295                 strict = mytype != "installed"
5296                 try:
5297                         for dep_root, dep_string, dep_priority in deps:
5298                                 if not dep_string:
5299                                         continue
5300                                 if debug:
5301                                         print
5302                                         print "Parent:   ", jbigkey
5303                                         print "Depstring:", dep_string
5304                                         print "Priority:", dep_priority
5305                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5306                                 try:
5307                                         selected_atoms = self._select_atoms(dep_root,
5308                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5309                                                 priority=dep_priority)
5310                                 except portage.exception.InvalidDependString, e:
5311                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5312                                         return 0
5313                                 if debug:
5314                                         print "Candidates:", selected_atoms
5315
5316                                 for atom in selected_atoms:
5317                                         try:
5318
5319                                                 atom = portage.dep.Atom(atom)
5320
5321                                                 mypriority = dep_priority.copy()
5322                                                 if not atom.blocker and vardb.match(atom):
5323                                                         mypriority.satisfied = True
5324
5325                                                 if not self._add_dep(Dependency(atom=atom,
5326                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5327                                                         priority=mypriority, root=dep_root),
5328                                                         allow_unsatisfied=allow_unsatisfied):
5329                                                         return 0
5330
5331                                         except portage.exception.InvalidAtom, e:
5332                                                 show_invalid_depstring_notice(
5333                                                         pkg, dep_string, str(e))
5334                                                 del e
5335                                                 if not pkg.installed:
5336                                                         return 0
5337
5338                                 if debug:
5339                                         print "Exiting...", jbigkey
5340                 except portage.exception.AmbiguousPackageName, e:
5341                         pkgs = e.args[0]
5342                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5343                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5344                         for cpv in pkgs:
5345                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5346                         portage.writemsg("\n", noiselevel=-1)
5347                         if mytype == "binary":
5348                                 portage.writemsg(
5349                                         "!!! This binary package cannot be installed: '%s'\n" % \
5350                                         mykey, noiselevel=-1)
5351                         elif mytype == "ebuild":
5352                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5353                                 myebuild, mylocation = portdb.findname2(mykey)
5354                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5355                                         "'%s'\n" % myebuild, noiselevel=-1)
5356                         portage.writemsg("!!! Please notify the package maintainer " + \
5357                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5358                         return 0
5359                 return 1
5360
5361         def _priority(self, **kwargs):
5362                 if "remove" in self.myparams:
5363                         priority_constructor = UnmergeDepPriority
5364                 else:
5365                         priority_constructor = DepPriority
5366                 return priority_constructor(**kwargs)
5367
5368         def _dep_expand(self, root_config, atom_without_category):
5369                 """
5370                 @param root_config: a root config instance
5371                 @type root_config: RootConfig
5372                 @param atom_without_category: an atom without a category component
5373                 @type atom_without_category: String
5374                 @rtype: list
5375                 @returns: a list of atoms containing categories (possibly empty)
5376                 """
5377                 null_cp = portage.dep_getkey(insert_category_into_atom(
5378                         atom_without_category, "null"))
5379                 cat, atom_pn = portage.catsplit(null_cp)
5380
5381                 dbs = self._filtered_trees[root_config.root]["dbs"]
5382                 categories = set()
5383                 for db, pkg_type, built, installed, db_keys in dbs:
5384                         for cat in db.categories:
5385                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5386                                         categories.add(cat)
5387
5388                 deps = []
5389                 for cat in categories:
5390                         deps.append(insert_category_into_atom(
5391                                 atom_without_category, cat))
5392                 return deps
5393
5394         def _have_new_virt(self, root, atom_cp):
5395                 ret = False
5396                 for db, pkg_type, built, installed, db_keys in \
5397                         self._filtered_trees[root]["dbs"]:
5398                         if db.cp_list(atom_cp):
5399                                 ret = True
5400                                 break
5401                 return ret
5402
5403         def _iter_atoms_for_pkg(self, pkg):
5404                 # TODO: add multiple $ROOT support
5405                 if pkg.root != self.target_root:
5406                         return
5407                 atom_arg_map = self._atom_arg_map
5408                 root_config = self.roots[pkg.root]
5409                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5410                         atom_cp = portage.dep_getkey(atom)
5411                         if atom_cp != pkg.cp and \
5412                                 self._have_new_virt(pkg.root, atom_cp):
5413                                 continue
5414                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5415                         visible_pkgs.reverse() # descending order
5416                         higher_slot = None
5417                         for visible_pkg in visible_pkgs:
5418                                 if visible_pkg.cp != atom_cp:
5419                                         continue
5420                                 if pkg >= visible_pkg:
5421                                         # This is descending order, and we're not
5422                                         # interested in any versions <= pkg given.
5423                                         break
5424                                 if pkg.slot_atom != visible_pkg.slot_atom:
5425                                         higher_slot = visible_pkg
5426                                         break
5427                         if higher_slot is not None:
5428                                 continue
5429                         for arg in atom_arg_map[(atom, pkg.root)]:
5430                                 if isinstance(arg, PackageArg) and \
5431                                         arg.package != pkg:
5432                                         continue
5433                                 yield arg, atom
5434
5435         def select_files(self, myfiles):
5436                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5437                 appropriate depgraph and return a favorite list."""
5438                 debug = "--debug" in self.myopts
5439                 root_config = self.roots[self.target_root]
5440                 sets = root_config.sets
5441                 getSetAtoms = root_config.setconfig.getSetAtoms
5442                 myfavorites=[]
5443                 myroot = self.target_root
5444                 dbs = self._filtered_trees[myroot]["dbs"]
5445                 vardb = self.trees[myroot]["vartree"].dbapi
5446                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5447                 portdb = self.trees[myroot]["porttree"].dbapi
5448                 bindb = self.trees[myroot]["bintree"].dbapi
5449                 pkgsettings = self.pkgsettings[myroot]
5450                 args = []
5451                 onlydeps = "--onlydeps" in self.myopts
5452                 lookup_owners = []
5453                 for x in myfiles:
5454                         ext = os.path.splitext(x)[1]
5455                         if ext==".tbz2":
5456                                 if not os.path.exists(x):
5457                                         if os.path.exists(
5458                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5459                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5460                                         elif os.path.exists(
5461                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5462                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5463                                         else:
5464                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5465                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5466                                                 return 0, myfavorites
5467                                 mytbz2=portage.xpak.tbz2(x)
5468                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5469                                 if os.path.realpath(x) != \
5470                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5471                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5472                                         return 0, myfavorites
5473                                 db_keys = list(bindb._aux_cache_keys)
5474                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5475                                 pkg = Package(type_name="binary", root_config=root_config,
5476                                         cpv=mykey, built=True, metadata=metadata,
5477                                         onlydeps=onlydeps)
5478                                 self._pkg_cache[pkg] = pkg
5479                                 args.append(PackageArg(arg=x, package=pkg,
5480                                         root_config=root_config))
5481                         elif ext==".ebuild":
5482                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5483                                 pkgdir = os.path.dirname(ebuild_path)
5484                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5485                                 cp = pkgdir[len(tree_root)+1:]
5486                                 e = portage.exception.PackageNotFound(
5487                                         ("%s is not in a valid portage tree " + \
5488                                         "hierarchy or does not exist") % x)
5489                                 if not portage.isvalidatom(cp):
5490                                         raise e
5491                                 cat = portage.catsplit(cp)[0]
5492                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5493                                 if not portage.isvalidatom("="+mykey):
5494                                         raise e
5495                                 ebuild_path = portdb.findname(mykey)
5496                                 if ebuild_path:
5497                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5498                                                 cp, os.path.basename(ebuild_path)):
5499                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5500                                                 return 0, myfavorites
5501                                         if mykey not in portdb.xmatch(
5502                                                 "match-visible", portage.dep_getkey(mykey)):
5503                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5504                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5505                                                 print colorize("BAD", "*** page for details.")
5506                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5507                                                         "Continuing...")
5508                                 else:
5509                                         raise portage.exception.PackageNotFound(
5510                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5511                                 db_keys = list(portdb._aux_cache_keys)
5512                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5513                                 pkg = Package(type_name="ebuild", root_config=root_config,
5514                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5515                                 pkgsettings.setcpv(pkg)
5516                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5517                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5518                                 self._pkg_cache[pkg] = pkg
5519                                 args.append(PackageArg(arg=x, package=pkg,
5520                                         root_config=root_config))
5521                         elif x.startswith(os.path.sep):
5522                                 if not x.startswith(myroot):
5523                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5524                                                 " $ROOT.\n") % x, noiselevel=-1)
5525                                         return 0, []
5526                                 # Queue these up since it's most efficient to handle
5527                                 # multiple files in a single iter_owners() call.
5528                                 lookup_owners.append(x)
5529                         else:
5530                                 if x in ("system", "world"):
5531                                         x = SETPREFIX + x
5532                                 if x.startswith(SETPREFIX):
5533                                         s = x[len(SETPREFIX):]
5534                                         if s not in sets:
5535                                                 raise portage.exception.PackageSetNotFound(s)
5536                                         if s in self._sets:
5537                                                 continue
5538                                         # Recursively expand sets so that containment tests in
5539                                         # self._get_parent_sets() properly match atoms in nested
5540                                         # sets (like if world contains system).
5541                                         expanded_set = InternalPackageSet(
5542                                                 initial_atoms=getSetAtoms(s))
5543                                         self._sets[s] = expanded_set
5544                                         args.append(SetArg(arg=x, set=expanded_set,
5545                                                 root_config=root_config))
5546                                         continue
5547                                 if not is_valid_package_atom(x):
5548                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5549                                                 noiselevel=-1)
5550                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5551                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5552                                         return (0,[])
5553                                 # Don't expand categories or old-style virtuals here unless
5554                                 # necessary. Expansion of old-style virtuals here causes at
5555                                 # least the following problems:
5556                                 #   1) It's more difficult to determine which set(s) an atom
5557                                 #      came from, if any.
5558                                 #   2) It takes away freedom from the resolver to choose other
5559                                 #      possible expansions when necessary.
5560                                 if "/" in x:
5561                                         args.append(AtomArg(arg=x, atom=x,
5562                                                 root_config=root_config))
5563                                         continue
5564                                 expanded_atoms = self._dep_expand(root_config, x)
5565                                 installed_cp_set = set()
5566                                 for atom in expanded_atoms:
5567                                         atom_cp = portage.dep_getkey(atom)
5568                                         if vardb.cp_list(atom_cp):
5569                                                 installed_cp_set.add(atom_cp)
5570                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5571                                         installed_cp = iter(installed_cp_set).next()
5572                                         expanded_atoms = [atom for atom in expanded_atoms \
5573                                                 if portage.dep_getkey(atom) == installed_cp]
5574
5575                                 if len(expanded_atoms) > 1:
5576                                         print
5577                                         print
5578                                         ambiguous_package_name(x, expanded_atoms, root_config,
5579                                                 self.spinner, self.myopts)
5580                                         return False, myfavorites
5581                                 if expanded_atoms:
5582                                         atom = expanded_atoms[0]
5583                                 else:
5584                                         null_atom = insert_category_into_atom(x, "null")
5585                                         null_cp = portage.dep_getkey(null_atom)
5586                                         cat, atom_pn = portage.catsplit(null_cp)
5587                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5588                                         if virts_p:
5589                                                 # Allow the depgraph to choose which virtual.
5590                                                 atom = insert_category_into_atom(x, "virtual")
5591                                         else:
5592                                                 atom = insert_category_into_atom(x, "null")
5593
5594                                 args.append(AtomArg(arg=x, atom=atom,
5595                                         root_config=root_config))
5596
5597                 if lookup_owners:
5598                         relative_paths = []
5599                         search_for_multiple = False
5600                         if len(lookup_owners) > 1:
5601                                 search_for_multiple = True
5602
5603                         for x in lookup_owners:
5604                                 if not search_for_multiple and os.path.isdir(x):
5605                                         search_for_multiple = True
5606                                 relative_paths.append(x[len(myroot):])
5607
5608                         owners = set()
5609                         for pkg, relative_path in \
5610                                 real_vardb._owners.iter_owners(relative_paths):
5611                                 owners.add(pkg.mycpv)
5612                                 if not search_for_multiple:
5613                                         break
5614
5615                         if not owners:
5616                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5617                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5618                                 return 0, []
5619
5620                         for cpv in owners:
5621                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5622                                 if not slot:
5623                                         # portage now masks packages with missing slot, but it's
5624                                         # possible that one was installed by an older version
5625                                         atom = portage.cpv_getkey(cpv)
5626                                 else:
5627                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5628                                 args.append(AtomArg(arg=atom, atom=atom,
5629                                         root_config=root_config))
5630
5631                 if "--update" in self.myopts:
5632                         # In some cases, the greedy slots behavior can pull in a slot that
5633                         # the user would want to uninstall due to it being blocked by a
5634                         # newer version in a different slot. Therefore, it's necessary to
5635                         # detect and discard any that should be uninstalled. Each time
5636                         # that arguments are updated, package selections are repeated in
5637                         # order to ensure consistency with the current arguments:
5638                         #
5639                         #  1) Initialize args
5640                         #  2) Select packages and generate initial greedy atoms
5641                         #  3) Update args with greedy atoms
5642                         #  4) Select packages and generate greedy atoms again, while
5643                         #     accounting for any blockers between selected packages
5644                         #  5) Update args with revised greedy atoms
5645
5646                         self._set_args(args)
5647                         greedy_args = []
5648                         for arg in args:
5649                                 greedy_args.append(arg)
5650                                 if not isinstance(arg, AtomArg):
5651                                         continue
5652                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5653                                         greedy_args.append(
5654                                                 AtomArg(arg=arg.arg, atom=atom,
5655                                                         root_config=arg.root_config))
5656
5657                         self._set_args(greedy_args)
5658                         del greedy_args
5659
5660                         # Revise greedy atoms, accounting for any blockers
5661                         # between selected packages.
5662                         revised_greedy_args = []
5663                         for arg in args:
5664                                 revised_greedy_args.append(arg)
5665                                 if not isinstance(arg, AtomArg):
5666                                         continue
5667                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5668                                         blocker_lookahead=True):
5669                                         revised_greedy_args.append(
5670                                                 AtomArg(arg=arg.arg, atom=atom,
5671                                                         root_config=arg.root_config))
5672                         args = revised_greedy_args
5673                         del revised_greedy_args
5674
5675                 self._set_args(args)
5676
5677                 myfavorites = set(myfavorites)
5678                 for arg in args:
5679                         if isinstance(arg, (AtomArg, PackageArg)):
5680                                 myfavorites.add(arg.atom)
5681                         elif isinstance(arg, SetArg):
5682                                 myfavorites.add(arg.arg)
5683                 myfavorites = list(myfavorites)
5684
5685                 pprovideddict = pkgsettings.pprovideddict
5686                 if debug:
5687                         portage.writemsg("\n", noiselevel=-1)
5688                 # Order needs to be preserved since a feature of --nodeps
5689                 # is to allow the user to force a specific merge order.
5690                 args.reverse()
5691                 while args:
5692                         arg = args.pop()
5693                         for atom in arg.set:
5694                                 self.spinner.update()
5695                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5696                                         root=myroot, parent=arg)
5697                                 atom_cp = portage.dep_getkey(atom)
5698                                 try:
5699                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5700                                         if pprovided and portage.match_from_list(atom, pprovided):
5701                                                 # A provided package has been specified on the command line.
5702                                                 self._pprovided_args.append((arg, atom))
5703                                                 continue
5704                                         if isinstance(arg, PackageArg):
5705                                                 if not self._add_pkg(arg.package, dep) or \
5706                                                         not self._create_graph():
5707                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5708                                                                 "dependencies for %s\n") % arg.arg)
5709                                                         return 0, myfavorites
5710                                                 continue
5711                                         if debug:
5712                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5713                                                         (arg, atom), noiselevel=-1)
5714                                         pkg, existing_node = self._select_package(
5715                                                 myroot, atom, onlydeps=onlydeps)
5716                                         if not pkg:
5717                                                 if not (isinstance(arg, SetArg) and \
5718                                                         arg.name in ("system", "world")):
5719                                                         self._unsatisfied_deps_for_display.append(
5720                                                                 ((myroot, atom), {}))
5721                                                         return 0, myfavorites
5722                                                 self._missing_args.append((arg, atom))
5723                                                 continue
5724                                         if atom_cp != pkg.cp:
5725                                                 # For old-style virtuals, we need to repeat the
5726                                                 # package.provided check against the selected package.
5727                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5728                                                 pprovided = pprovideddict.get(pkg.cp)
5729                                                 if pprovided and \
5730                                                         portage.match_from_list(expanded_atom, pprovided):
5731                                                         # A provided package has been
5732                                                         # specified on the command line.
5733                                                         self._pprovided_args.append((arg, atom))
5734                                                         continue
5735                                         if pkg.installed and "selective" not in self.myparams:
5736                                                 self._unsatisfied_deps_for_display.append(
5737                                                         ((myroot, atom), {}))
5738                                                 # Previous behavior was to bail out in this case, but
5739                                                 # since the dep is satisfied by the installed package,
5740                                                 # it's more friendly to continue building the graph
5741                                                 # and just show a warning message. Therefore, only bail
5742                                                 # out here if the atom is not from either the system or
5743                                                 # world set.
5744                                                 if not (isinstance(arg, SetArg) and \
5745                                                         arg.name in ("system", "world")):
5746                                                         return 0, myfavorites
5747
5748                                         # Add the selected package to the graph as soon as possible
5749                                         # so that later dep_check() calls can use it as feedback
5750                                         # for making more consistent atom selections.
5751                                         if not self._add_pkg(pkg, dep):
5752                                                 if isinstance(arg, SetArg):
5753                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5754                                                                 "dependencies for %s from %s\n") % \
5755                                                                 (atom, arg.arg))
5756                                                 else:
5757                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5758                                                                 "dependencies for %s\n") % atom)
5759                                                 return 0, myfavorites
5760
5761                                 except portage.exception.MissingSignature, e:
5762                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5763                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5764                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5765                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5766                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5767                                         return 0, myfavorites
5768                                 except portage.exception.InvalidSignature, e:
5769                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5770                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5771                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5772                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5773                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5774                                         return 0, myfavorites
5775                                 except SystemExit, e:
5776                                         raise # Needed else can't exit
5777                                 except Exception, e:
5778                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5779                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5780                                         raise
5781
5782                 # Now that the root packages have been added to the graph,
5783                 # process the dependencies.
5784                 if not self._create_graph():
5785                         return 0, myfavorites
5786
5787                 missing=0
5788                 if "--usepkgonly" in self.myopts:
5789                         for xs in self.digraph.all_nodes():
5790                                 if not isinstance(xs, Package):
5791                                         continue
5792                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5793                                         if missing == 0:
5794                                                 print
5795                                         missing += 1
5796                                         print "Missing binary for:",xs[2]
5797
5798                 try:
5799                         self.altlist()
5800                 except self._unknown_internal_error:
5801                         return False, myfavorites
5802
5803                 # We're true here unless we are missing binaries.
5804                 return (not missing,myfavorites)
5805
5806         def _set_args(self, args):
5807                 """
5808                 Create the "args" package set from atoms and packages given as
5809                 arguments. This method can be called multiple times if necessary.
5810                 The package selection cache is automatically invalidated, since
5811                 arguments influence package selections.
5812                 """
5813                 args_set = self._sets["args"]
5814                 args_set.clear()
5815                 for arg in args:
5816                         if not isinstance(arg, (AtomArg, PackageArg)):
5817                                 continue
5818                         atom = arg.atom
5819                         if atom in args_set:
5820                                 continue
5821                         args_set.add(atom)
5822
5823                 self._set_atoms.clear()
5824                 self._set_atoms.update(chain(*self._sets.itervalues()))
5825                 atom_arg_map = self._atom_arg_map
5826                 atom_arg_map.clear()
5827                 for arg in args:
5828                         for atom in arg.set:
5829                                 atom_key = (atom, arg.root_config.root)
5830                                 refs = atom_arg_map.get(atom_key)
5831                                 if refs is None:
5832                                         refs = []
5833                                         atom_arg_map[atom_key] = refs
5834                                         if arg not in refs:
5835                                                 refs.append(arg)
5836
5837                 # Invalidate the package selection cache, since
5838                 # arguments influence package selections.
5839                 self._highest_pkg_cache.clear()
5840                 for trees in self._filtered_trees.itervalues():
5841                         trees["porttree"].dbapi._clear_cache()
5842
5843         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5844                 """
5845                 Return a list of slot atoms corresponding to installed slots that
5846                 differ from the slot of the highest visible match. When
5847                 blocker_lookahead is True, slot atoms that would trigger a blocker
5848                 conflict are automatically discarded, potentially allowing automatic
5849                 uninstallation of older slots when appropriate.
5850                 """
5851                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5852                 if highest_pkg is None:
5853                         return []
5854                 vardb = root_config.trees["vartree"].dbapi
5855                 slots = set()
5856                 for cpv in vardb.match(atom):
5857                         # don't mix new virtuals with old virtuals
5858                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5859                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5860
5861                 slots.add(highest_pkg.metadata["SLOT"])
5862                 if len(slots) == 1:
5863                         return []
5864                 greedy_pkgs = []
5865                 slots.remove(highest_pkg.metadata["SLOT"])
5866                 while slots:
5867                         slot = slots.pop()
5868                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5869                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5870                         if pkg is not None and \
5871                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5872                                 greedy_pkgs.append(pkg)
5873                 if not greedy_pkgs:
5874                         return []
5875                 if not blocker_lookahead:
5876                         return [pkg.slot_atom for pkg in greedy_pkgs]
5877
5878                 blockers = {}
5879                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5880                 for pkg in greedy_pkgs + [highest_pkg]:
5881                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5882                         try:
5883                                 atoms = self._select_atoms(
5884                                         pkg.root, dep_str, pkg.use.enabled,
5885                                         parent=pkg, strict=True)
5886                         except portage.exception.InvalidDependString:
5887                                 continue
5888                         blocker_atoms = (x for x in atoms if x.blocker)
5889                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5890
5891                 if highest_pkg not in blockers:
5892                         return []
5893
5894                 # filter packages with invalid deps
5895                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5896
5897                 # filter packages that conflict with highest_pkg
5898                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5899                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5900                         blockers[pkg].findAtomForPackage(highest_pkg))]
5901
5902                 if not greedy_pkgs:
5903                         return []
5904
5905                 # If two packages conflict, discard the lower version.
5906                 discard_pkgs = set()
5907                 greedy_pkgs.sort(reverse=True)
5908                 for i in xrange(len(greedy_pkgs) - 1):
5909                         pkg1 = greedy_pkgs[i]
5910                         if pkg1 in discard_pkgs:
5911                                 continue
5912                         for j in xrange(i + 1, len(greedy_pkgs)):
5913                                 pkg2 = greedy_pkgs[j]
5914                                 if pkg2 in discard_pkgs:
5915                                         continue
5916                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5917                                         blockers[pkg2].findAtomForPackage(pkg1):
5918                                         # pkg1 > pkg2
5919                                         discard_pkgs.add(pkg2)
5920
5921                 return [pkg.slot_atom for pkg in greedy_pkgs \
5922                         if pkg not in discard_pkgs]
5923
5924         def _select_atoms_from_graph(self, *pargs, **kwargs):
5925                 """
5926                 Prefer atoms matching packages that have already been
5927                 added to the graph or those that are installed and have
5928                 not been scheduled for replacement.
5929                 """
5930                 kwargs["trees"] = self._graph_trees
5931                 return self._select_atoms_highest_available(*pargs, **kwargs)
5932
5933         def _select_atoms_highest_available(self, root, depstring,
5934                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5935                 """This will raise InvalidDependString if necessary. If trees is
5936                 None then self._filtered_trees is used."""
5937                 pkgsettings = self.pkgsettings[root]
5938                 if trees is None:
5939                         trees = self._filtered_trees
5940                 if not getattr(priority, "buildtime", False):
5941                         # The parent should only be passed to dep_check() for buildtime
5942                         # dependencies since that's the only case when it's appropriate
5943                         # to trigger the circular dependency avoidance code which uses it.
5944                         # It's important not to trigger the same circular dependency
5945                         # avoidance code for runtime dependencies since it's not needed
5946                         # and it can promote an incorrect package choice.
5947                         parent = None
5948                 if True:
5949                         try:
5950                                 if parent is not None:
5951                                         trees[root]["parent"] = parent
5952                                 if not strict:
5953                                         portage.dep._dep_check_strict = False
5954                                 mycheck = portage.dep_check(depstring, None,
5955                                         pkgsettings, myuse=myuse,
5956                                         myroot=root, trees=trees)
5957                         finally:
5958                                 if parent is not None:
5959                                         trees[root].pop("parent")
5960                                 portage.dep._dep_check_strict = True
5961                         if not mycheck[0]:
5962                                 raise portage.exception.InvalidDependString(mycheck[1])
5963                         selected_atoms = mycheck[1]
5964                 return selected_atoms
5965
5966         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5967                 atom = portage.dep.Atom(atom)
5968                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5969                 atom_without_use = atom
5970                 if atom.use:
5971                         atom_without_use = portage.dep.remove_slot(atom)
5972                         if atom.slot:
5973                                 atom_without_use += ":" + atom.slot
5974                         atom_without_use = portage.dep.Atom(atom_without_use)
5975                 xinfo = '"%s"' % atom
5976                 if arg:
5977                         xinfo='"%s"' % arg
5978                 # Discard null/ from failed cpv_expand category expansion.
5979                 xinfo = xinfo.replace("null/", "")
5980                 masked_packages = []
5981                 missing_use = []
5982                 masked_pkg_instances = set()
5983                 missing_licenses = []
5984                 have_eapi_mask = False
5985                 pkgsettings = self.pkgsettings[root]
5986                 implicit_iuse = pkgsettings._get_implicit_iuse()
5987                 root_config = self.roots[root]
5988                 portdb = self.roots[root].trees["porttree"].dbapi
5989                 dbs = self._filtered_trees[root]["dbs"]
5990                 for db, pkg_type, built, installed, db_keys in dbs:
5991                         if installed:
5992                                 continue
5993                         match = db.match
5994                         if hasattr(db, "xmatch"):
5995                                 cpv_list = db.xmatch("match-all", atom_without_use)
5996                         else:
5997                                 cpv_list = db.match(atom_without_use)
5998                         # descending order
5999                         cpv_list.reverse()
6000                         for cpv in cpv_list:
6001                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6002                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6003                                 if metadata is not None:
6004                                         pkg = Package(built=built, cpv=cpv,
6005                                                 installed=installed, metadata=metadata,
6006                                                 root_config=root_config)
6007                                         if pkg.cp != atom.cp:
6008                                                 # A cpv can be returned from dbapi.match() as an
6009                                                 # old-style virtual match even in cases when the
6010                                                 # package does not actually PROVIDE the virtual.
6011                                                 # Filter out any such false matches here.
6012                                                 if not atom_set.findAtomForPackage(pkg):
6013                                                         continue
6014                                         if mreasons:
6015                                                 masked_pkg_instances.add(pkg)
6016                                         if atom.use:
6017                                                 missing_use.append(pkg)
6018                                                 if not mreasons:
6019                                                         continue
6020                                 masked_packages.append(
6021                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6022
6023                 missing_use_reasons = []
6024                 missing_iuse_reasons = []
6025                 for pkg in missing_use:
6026                         use = pkg.use.enabled
6027                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6028                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6029                         missing_iuse = []
6030                         for x in atom.use.required:
6031                                 if iuse_re.match(x) is None:
6032                                         missing_iuse.append(x)
6033                         mreasons = []
6034                         if missing_iuse:
6035                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6036                                 missing_iuse_reasons.append((pkg, mreasons))
6037                         else:
6038                                 need_enable = sorted(atom.use.enabled.difference(use))
6039                                 need_disable = sorted(atom.use.disabled.intersection(use))
6040                                 if need_enable or need_disable:
6041                                         changes = []
6042                                         changes.extend(colorize("red", "+" + x) \
6043                                                 for x in need_enable)
6044                                         changes.extend(colorize("blue", "-" + x) \
6045                                                 for x in need_disable)
6046                                         mreasons.append("Change USE: %s" % " ".join(changes))
6047                                         missing_use_reasons.append((pkg, mreasons))
6048
6049                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6050                         in missing_use_reasons if pkg not in masked_pkg_instances]
6051
6052                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6053                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6054
6055                 show_missing_use = False
6056                 if unmasked_use_reasons:
6057                         # Only show the latest version.
6058                         show_missing_use = unmasked_use_reasons[:1]
6059                 elif unmasked_iuse_reasons:
6060                         if missing_use_reasons:
6061                                 # All packages with required IUSE are masked,
6062                                 # so display a normal masking message.
6063                                 pass
6064                         else:
6065                                 show_missing_use = unmasked_iuse_reasons
6066
6067                 if show_missing_use:
6068                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6069                         print "!!! One of the following packages is required to complete your request:"
6070                         for pkg, mreasons in show_missing_use:
6071                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6072
6073                 elif masked_packages:
6074                         print "\n!!! " + \
6075                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6076                                 colorize("INFORM", xinfo) + \
6077                                 colorize("BAD", " have been masked.")
6078                         print "!!! One of the following masked packages is required to complete your request:"
6079                         have_eapi_mask = show_masked_packages(masked_packages)
6080                         if have_eapi_mask:
6081                                 print
6082                                 msg = ("The current version of portage supports " + \
6083                                         "EAPI '%s'. You must upgrade to a newer version" + \
6084                                         " of portage before EAPI masked packages can" + \
6085                                         " be installed.") % portage.const.EAPI
6086                                 from textwrap import wrap
6087                                 for line in wrap(msg, 75):
6088                                         print line
6089                         print
6090                         show_mask_docs()
6091                 else:
6092                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6093
6094                 # Show parent nodes and the argument that pulled them in.
6095                 traversed_nodes = set()
6096                 node = myparent
6097                 msg = []
6098                 while node is not None:
6099                         traversed_nodes.add(node)
6100                         msg.append('(dependency required by "%s" [%s])' % \
6101                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6102                         # When traversing to parents, prefer arguments over packages
6103                         # since arguments are root nodes. Never traverse the same
6104                         # package twice, in order to prevent an infinite loop.
6105                         selected_parent = None
6106                         for parent in self.digraph.parent_nodes(node):
6107                                 if isinstance(parent, DependencyArg):
6108                                         msg.append('(dependency required by "%s" [argument])' % \
6109                                                 (colorize('INFORM', str(parent))))
6110                                         selected_parent = None
6111                                         break
6112                                 if parent not in traversed_nodes:
6113                                         selected_parent = parent
6114                         node = selected_parent
6115                 for line in msg:
6116                         print line
6117
6118                 print
6119
6120         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6121                 cache_key = (root, atom, onlydeps)
6122                 ret = self._highest_pkg_cache.get(cache_key)
6123                 if ret is not None:
6124                         pkg, existing = ret
6125                         if pkg and not existing:
6126                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6127                                 if existing and existing == pkg:
6128                                         # Update the cache to reflect that the
6129                                         # package has been added to the graph.
6130                                         ret = pkg, pkg
6131                                         self._highest_pkg_cache[cache_key] = ret
6132                         return ret
6133                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6134                 self._highest_pkg_cache[cache_key] = ret
6135                 pkg, existing = ret
6136                 if pkg is not None:
6137                         settings = pkg.root_config.settings
6138                         if visible(settings, pkg) and not (pkg.installed and \
6139                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6140                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6141                 return ret
6142
6143         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6144                 root_config = self.roots[root]
6145                 pkgsettings = self.pkgsettings[root]
6146                 dbs = self._filtered_trees[root]["dbs"]
6147                 vardb = self.roots[root].trees["vartree"].dbapi
6148                 portdb = self.roots[root].trees["porttree"].dbapi
6149                 # List of acceptable packages, ordered by type preference.
6150                 matched_packages = []
6151                 highest_version = None
6152                 if not isinstance(atom, portage.dep.Atom):
6153                         atom = portage.dep.Atom(atom)
6154                 atom_cp = atom.cp
6155                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6156                 existing_node = None
6157                 myeb = None
6158                 usepkgonly = "--usepkgonly" in self.myopts
6159                 empty = "empty" in self.myparams
6160                 selective = "selective" in self.myparams
6161                 reinstall = False
6162                 noreplace = "--noreplace" in self.myopts
6163                 # Behavior of the "selective" parameter depends on
6164                 # whether or not a package matches an argument atom.
6165                 # If an installed package provides an old-style
6166                 # virtual that is no longer provided by an available
6167                 # package, the installed package may match an argument
6168                 # atom even though none of the available packages do.
6169                 # Therefore, "selective" logic does not consider
6170                 # whether or not an installed package matches an
6171                 # argument atom. It only considers whether or not
6172                 # available packages match argument atoms, which is
6173                 # represented by the found_available_arg flag.
6174                 found_available_arg = False
6175                 for find_existing_node in True, False:
6176                         if existing_node:
6177                                 break
6178                         for db, pkg_type, built, installed, db_keys in dbs:
6179                                 if existing_node:
6180                                         break
6181                                 if installed and not find_existing_node:
6182                                         want_reinstall = reinstall or empty or \
6183                                                 (found_available_arg and not selective)
6184                                         if want_reinstall and matched_packages:
6185                                                 continue
6186                                 if hasattr(db, "xmatch"):
6187                                         cpv_list = db.xmatch("match-all", atom)
6188                                 else:
6189                                         cpv_list = db.match(atom)
6190
6191                                 # USE=multislot can make an installed package appear as if
6192                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6193                                 # won't do any good as long as USE=multislot is enabled since
6194                                 # the newly built package still won't have the expected slot.
6195                                 # Therefore, assume that such SLOT dependencies are already
6196                                 # satisfied rather than forcing a rebuild.
6197                                 if installed and not cpv_list and atom.slot:
6198                                         for cpv in db.match(atom.cp):
6199                                                 slot_available = False
6200                                                 for other_db, other_type, other_built, \
6201                                                         other_installed, other_keys in dbs:
6202                                                         try:
6203                                                                 if atom.slot == \
6204                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6205                                                                         slot_available = True
6206                                                                         break
6207                                                         except KeyError:
6208                                                                 pass
6209                                                 if not slot_available:
6210                                                         continue
6211                                                 inst_pkg = self._pkg(cpv, "installed",
6212                                                         root_config, installed=installed)
6213                                                 # Remove the slot from the atom and verify that
6214                                                 # the package matches the resulting atom.
6215                                                 atom_without_slot = portage.dep.remove_slot(atom)
6216                                                 if atom.use:
6217                                                         atom_without_slot += str(atom.use)
6218                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6219                                                 if portage.match_from_list(
6220                                                         atom_without_slot, [inst_pkg]):
6221                                                         cpv_list = [inst_pkg.cpv]
6222                                                 break
6223
6224                                 if not cpv_list:
6225                                         continue
6226                                 pkg_status = "merge"
6227                                 if installed or onlydeps:
6228                                         pkg_status = "nomerge"
6229                                 # descending order
6230                                 cpv_list.reverse()
6231                                 for cpv in cpv_list:
6232                                         # Make --noreplace take precedence over --newuse.
6233                                         if not installed and noreplace and \
6234                                                 cpv in vardb.match(atom):
6235                                                 # If the installed version is masked, it may
6236                                                 # be necessary to look at lower versions,
6237                                                 # in case there is a visible downgrade.
6238                                                 continue
6239                                         reinstall_for_flags = None
6240                                         cache_key = (pkg_type, root, cpv, pkg_status)
6241                                         calculated_use = True
6242                                         pkg = self._pkg_cache.get(cache_key)
6243                                         if pkg is None:
6244                                                 calculated_use = False
6245                                                 try:
6246                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6247                                                 except KeyError:
6248                                                         continue
6249                                                 pkg = Package(built=built, cpv=cpv,
6250                                                         installed=installed, metadata=metadata,
6251                                                         onlydeps=onlydeps, root_config=root_config,
6252                                                         type_name=pkg_type)
6253                                                 metadata = pkg.metadata
6254                                                 if not built:
6255                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6256                                                 if not built and ("?" in metadata["LICENSE"] or \
6257                                                         "?" in metadata["PROVIDE"]):
6258                                                         # This is avoided whenever possible because
6259                                                         # it's expensive. It only needs to be done here
6260                                                         # if it has an effect on visibility.
6261                                                         pkgsettings.setcpv(pkg)
6262                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6263                                                         calculated_use = True
6264                                                 self._pkg_cache[pkg] = pkg
6265
6266                                         if not installed or (built and matched_packages):
6267                                                 # Only enforce visibility on installed packages
6268                                                 # if there is at least one other visible package
6269                                                 # available. By filtering installed masked packages
6270                                                 # here, packages that have been masked since they
6271                                                 # were installed can be automatically downgraded
6272                                                 # to an unmasked version.
6273                                                 try:
6274                                                         if not visible(pkgsettings, pkg):
6275                                                                 continue
6276                                                 except portage.exception.InvalidDependString:
6277                                                         if not installed:
6278                                                                 continue
6279
6280                                                 # Enable upgrade or downgrade to a version
6281                                                 # with visible KEYWORDS when the installed
6282                                                 # version is masked by KEYWORDS, but never
6283                                                 # reinstall the same exact version only due
6284                                                 # to a KEYWORDS mask.
6285                                                 if built and matched_packages:
6286
6287                                                         different_version = None
6288                                                         for avail_pkg in matched_packages:
6289                                                                 if not portage.dep.cpvequal(
6290                                                                         pkg.cpv, avail_pkg.cpv):
6291                                                                         different_version = avail_pkg
6292                                                                         break
6293                                                         if different_version is not None:
6294
6295                                                                 if installed and \
6296                                                                         pkgsettings._getMissingKeywords(
6297                                                                         pkg.cpv, pkg.metadata):
6298                                                                         continue
6299
6300                                                                 # If the ebuild no longer exists or it's
6301                                                                 # keywords have been dropped, reject built
6302                                                                 # instances (installed or binary).
6303                                                                 # If --usepkgonly is enabled, assume that
6304                                                                 # the ebuild status should be ignored.
6305                                                                 if not usepkgonly:
6306                                                                         try:
6307                                                                                 pkg_eb = self._pkg(
6308                                                                                         pkg.cpv, "ebuild", root_config)
6309                                                                         except portage.exception.PackageNotFound:
6310                                                                                 continue
6311                                                                         else:
6312                                                                                 if not visible(pkgsettings, pkg_eb):
6313                                                                                         continue
6314
6315                                         if not pkg.built and not calculated_use:
6316                                                 # This is avoided whenever possible because
6317                                                 # it's expensive.
6318                                                 pkgsettings.setcpv(pkg)
6319                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6320
6321                                         if pkg.cp != atom.cp:
6322                                                 # A cpv can be returned from dbapi.match() as an
6323                                                 # old-style virtual match even in cases when the
6324                                                 # package does not actually PROVIDE the virtual.
6325                                                 # Filter out any such false matches here.
6326                                                 if not atom_set.findAtomForPackage(pkg):
6327                                                         continue
6328
6329                                         myarg = None
6330                                         if root == self.target_root:
6331                                                 try:
6332                                                         # Ebuild USE must have been calculated prior
6333                                                         # to this point, in case atoms have USE deps.
6334                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6335                                                 except StopIteration:
6336                                                         pass
6337                                                 except portage.exception.InvalidDependString:
6338                                                         if not installed:
6339                                                                 # masked by corruption
6340                                                                 continue
6341                                         if not installed and myarg:
6342                                                 found_available_arg = True
6343
6344                                         if atom.use and not pkg.built:
6345                                                 use = pkg.use.enabled
6346                                                 if atom.use.enabled.difference(use):
6347                                                         continue
6348                                                 if atom.use.disabled.intersection(use):
6349                                                         continue
6350                                         if pkg.cp == atom_cp:
6351                                                 if highest_version is None:
6352                                                         highest_version = pkg
6353                                                 elif pkg > highest_version:
6354                                                         highest_version = pkg
6355                                         # At this point, we've found the highest visible
6356                                         # match from the current repo. Any lower versions
6357                                         # from this repo are ignored, so this so the loop
6358                                         # will always end with a break statement below
6359                                         # this point.
6360                                         if find_existing_node:
6361                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6362                                                 if not e_pkg:
6363                                                         break
6364                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6365                                                         if highest_version and \
6366                                                                 e_pkg.cp == atom_cp and \
6367                                                                 e_pkg < highest_version and \
6368                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6369                                                                 # There is a higher version available in a
6370                                                                 # different slot, so this existing node is
6371                                                                 # irrelevant.
6372                                                                 pass
6373                                                         else:
6374                                                                 matched_packages.append(e_pkg)
6375                                                                 existing_node = e_pkg
6376                                                 break
6377                                         # Compare built package to current config and
6378                                         # reject the built package if necessary.
6379                                         if built and not installed and \
6380                                                 ("--newuse" in self.myopts or \
6381                                                 "--reinstall" in self.myopts):
6382                                                 iuses = pkg.iuse.all
6383                                                 old_use = pkg.use.enabled
6384                                                 if myeb:
6385                                                         pkgsettings.setcpv(myeb)
6386                                                 else:
6387                                                         pkgsettings.setcpv(pkg)
6388                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6389                                                 forced_flags = set()
6390                                                 forced_flags.update(pkgsettings.useforce)
6391                                                 forced_flags.update(pkgsettings.usemask)
6392                                                 cur_iuse = iuses
6393                                                 if myeb and not usepkgonly:
6394                                                         cur_iuse = myeb.iuse.all
6395                                                 if self._reinstall_for_flags(forced_flags,
6396                                                         old_use, iuses,
6397                                                         now_use, cur_iuse):
6398                                                         break
6399                                         # Compare current config to installed package
6400                                         # and do not reinstall if possible.
6401                                         if not installed and \
6402                                                 ("--newuse" in self.myopts or \
6403                                                 "--reinstall" in self.myopts) and \
6404                                                 cpv in vardb.match(atom):
6405                                                 pkgsettings.setcpv(pkg)
6406                                                 forced_flags = set()
6407                                                 forced_flags.update(pkgsettings.useforce)
6408                                                 forced_flags.update(pkgsettings.usemask)
6409                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6410                                                 old_iuse = set(filter_iuse_defaults(
6411                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6412                                                 cur_use = pkg.use.enabled
6413                                                 cur_iuse = pkg.iuse.all
6414                                                 reinstall_for_flags = \
6415                                                         self._reinstall_for_flags(
6416                                                         forced_flags, old_use, old_iuse,
6417                                                         cur_use, cur_iuse)
6418                                                 if reinstall_for_flags:
6419                                                         reinstall = True
6420                                         if not built:
6421                                                 myeb = pkg
6422                                         matched_packages.append(pkg)
6423                                         if reinstall_for_flags:
6424                                                 self._reinstall_nodes[pkg] = \
6425                                                         reinstall_for_flags
6426                                         break
6427
6428                 if not matched_packages:
6429                         return None, None
6430
6431                 if "--debug" in self.myopts:
6432                         for pkg in matched_packages:
6433                                 portage.writemsg("%s %s\n" % \
6434                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6435
6436                 # Filter out any old-style virtual matches if they are
6437                 # mixed with new-style virtual matches.
6438                 cp = portage.dep_getkey(atom)
6439                 if len(matched_packages) > 1 and \
6440                         "virtual" == portage.catsplit(cp)[0]:
6441                         for pkg in matched_packages:
6442                                 if pkg.cp != cp:
6443                                         continue
6444                                 # Got a new-style virtual, so filter
6445                                 # out any old-style virtuals.
6446                                 matched_packages = [pkg for pkg in matched_packages \
6447                                         if pkg.cp == cp]
6448                                 break
6449
6450                 if len(matched_packages) > 1:
6451                         bestmatch = portage.best(
6452                                 [pkg.cpv for pkg in matched_packages])
6453                         matched_packages = [pkg for pkg in matched_packages \
6454                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6455
6456                 # ordered by type preference ("ebuild" type is the last resort)
6457                 return  matched_packages[-1], existing_node
6458
6459         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6460                 """
6461                 Select packages that have already been added to the graph or
6462                 those that are installed and have not been scheduled for
6463                 replacement.
6464                 """
6465                 graph_db = self._graph_trees[root]["porttree"].dbapi
6466                 matches = graph_db.match_pkgs(atom)
6467                 if not matches:
6468                         return None, None
6469                 pkg = matches[-1] # highest match
6470                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6471                 return pkg, in_graph
6472
6473         def _complete_graph(self):
6474                 """
6475                 Add any deep dependencies of required sets (args, system, world) that
6476                 have not been pulled into the graph yet. This ensures that the graph
6477                 is consistent such that initially satisfied deep dependencies are not
6478                 broken in the new graph. Initially unsatisfied dependencies are
6479                 irrelevant since we only want to avoid breaking dependencies that are
6480                 intially satisfied.
6481
6482                 Since this method can consume enough time to disturb users, it is
6483                 currently only enabled by the --complete-graph option.
6484                 """
6485                 if "--buildpkgonly" in self.myopts or \
6486                         "recurse" not in self.myparams:
6487                         return 1
6488
6489                 if "complete" not in self.myparams:
6490                         # Skip this to avoid consuming enough time to disturb users.
6491                         return 1
6492
6493                 # Put the depgraph into a mode that causes it to only
6494                 # select packages that have already been added to the
6495                 # graph or those that are installed and have not been
6496                 # scheduled for replacement. Also, toggle the "deep"
6497                 # parameter so that all dependencies are traversed and
6498                 # accounted for.
6499                 self._select_atoms = self._select_atoms_from_graph
6500                 self._select_package = self._select_pkg_from_graph
6501                 already_deep = "deep" in self.myparams
6502                 if not already_deep:
6503                         self.myparams.add("deep")
6504
6505                 for root in self.roots:
6506                         required_set_names = self._required_set_names.copy()
6507                         if root == self.target_root and \
6508                                 (already_deep or "empty" in self.myparams):
6509                                 required_set_names.difference_update(self._sets)
6510                         if not required_set_names and not self._ignored_deps:
6511                                 continue
6512                         root_config = self.roots[root]
6513                         setconfig = root_config.setconfig
6514                         args = []
6515                         # Reuse existing SetArg instances when available.
6516                         for arg in self.digraph.root_nodes():
6517                                 if not isinstance(arg, SetArg):
6518                                         continue
6519                                 if arg.root_config != root_config:
6520                                         continue
6521                                 if arg.name in required_set_names:
6522                                         args.append(arg)
6523                                         required_set_names.remove(arg.name)
6524                         # Create new SetArg instances only when necessary.
6525                         for s in required_set_names:
6526                                 expanded_set = InternalPackageSet(
6527                                         initial_atoms=setconfig.getSetAtoms(s))
6528                                 atom = SETPREFIX + s
6529                                 args.append(SetArg(arg=atom, set=expanded_set,
6530                                         root_config=root_config))
6531                         vardb = root_config.trees["vartree"].dbapi
6532                         for arg in args:
6533                                 for atom in arg.set:
6534                                         self._dep_stack.append(
6535                                                 Dependency(atom=atom, root=root, parent=arg))
6536                         if self._ignored_deps:
6537                                 self._dep_stack.extend(self._ignored_deps)
6538                                 self._ignored_deps = []
6539                         if not self._create_graph(allow_unsatisfied=True):
6540                                 return 0
6541                         # Check the unsatisfied deps to see if any initially satisfied deps
6542                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6543                         # deps are irrelevant since we only want to avoid breaking deps
6544                         # that are initially satisfied.
6545                         while self._unsatisfied_deps:
6546                                 dep = self._unsatisfied_deps.pop()
6547                                 matches = vardb.match_pkgs(dep.atom)
6548                                 if not matches:
6549                                         self._initially_unsatisfied_deps.append(dep)
6550                                         continue
6551                                 # An scheduled installation broke a deep dependency.
6552                                 # Add the installed package to the graph so that it
6553                                 # will be appropriately reported as a slot collision
6554                                 # (possibly solvable via backtracking).
6555                                 pkg = matches[-1] # highest match
6556                                 if not self._add_pkg(pkg, dep):
6557                                         return 0
6558                                 if not self._create_graph(allow_unsatisfied=True):
6559                                         return 0
6560                 return 1
6561
6562         def _pkg(self, cpv, type_name, root_config, installed=False):
6563                 """
6564                 Get a package instance from the cache, or create a new
6565                 one if necessary. Raises KeyError from aux_get if it
6566                 failures for some reason (package does not exist or is
6567                 corrupt).
6568                 """
6569                 operation = "merge"
6570                 if installed:
6571                         operation = "nomerge"
6572                 pkg = self._pkg_cache.get(
6573                         (type_name, root_config.root, cpv, operation))
6574                 if pkg is None:
6575                         tree_type = self.pkg_tree_map[type_name]
6576                         db = root_config.trees[tree_type].dbapi
6577                         db_keys = list(self._trees_orig[root_config.root][
6578                                 tree_type].dbapi._aux_cache_keys)
6579                         try:
6580                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6581                         except KeyError:
6582                                 raise portage.exception.PackageNotFound(cpv)
6583                         pkg = Package(cpv=cpv, metadata=metadata,
6584                                 root_config=root_config, installed=installed)
6585                         if type_name == "ebuild":
6586                                 settings = self.pkgsettings[root_config.root]
6587                                 settings.setcpv(pkg)
6588                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6589                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6590                         self._pkg_cache[pkg] = pkg
6591                 return pkg
6592
6593         def validate_blockers(self):
6594                 """Remove any blockers from the digraph that do not match any of the
6595                 packages within the graph.  If necessary, create hard deps to ensure
6596                 correct merge order such that mutually blocking packages are never
6597                 installed simultaneously."""
6598
6599                 if "--buildpkgonly" in self.myopts or \
6600                         "--nodeps" in self.myopts:
6601                         return True
6602
6603                 #if "deep" in self.myparams:
6604                 if True:
6605                         # Pull in blockers from all installed packages that haven't already
6606                         # been pulled into the depgraph.  This is not enabled by default
6607                         # due to the performance penalty that is incurred by all the
6608                         # additional dep_check calls that are required.
6609
6610                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6611                         for myroot in self.trees:
6612                                 vardb = self.trees[myroot]["vartree"].dbapi
6613                                 portdb = self.trees[myroot]["porttree"].dbapi
6614                                 pkgsettings = self.pkgsettings[myroot]
6615                                 final_db = self.mydbapi[myroot]
6616
6617                                 blocker_cache = BlockerCache(myroot, vardb)
6618                                 stale_cache = set(blocker_cache)
6619                                 for pkg in vardb:
6620                                         cpv = pkg.cpv
6621                                         stale_cache.discard(cpv)
6622                                         pkg_in_graph = self.digraph.contains(pkg)
6623
6624                                         # Check for masked installed packages. Only warn about
6625                                         # packages that are in the graph in order to avoid warning
6626                                         # about those that will be automatically uninstalled during
6627                                         # the merge process or by --depclean.
6628                                         if pkg in final_db:
6629                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6630                                                         self._masked_installed.add(pkg)
6631
6632                                         blocker_atoms = None
6633                                         blockers = None
6634                                         if pkg_in_graph:
6635                                                 blockers = []
6636                                                 try:
6637                                                         blockers.extend(
6638                                                                 self._blocker_parents.child_nodes(pkg))
6639                                                 except KeyError:
6640                                                         pass
6641                                                 try:
6642                                                         blockers.extend(
6643                                                                 self._irrelevant_blockers.child_nodes(pkg))
6644                                                 except KeyError:
6645                                                         pass
6646                                         if blockers is not None:
6647                                                 blockers = set(str(blocker.atom) \
6648                                                         for blocker in blockers)
6649
6650                                         # If this node has any blockers, create a "nomerge"
6651                                         # node for it so that they can be enforced.
6652                                         self.spinner.update()
6653                                         blocker_data = blocker_cache.get(cpv)
6654                                         if blocker_data is not None and \
6655                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6656                                                 blocker_data = None
6657
6658                                         # If blocker data from the graph is available, use
6659                                         # it to validate the cache and update the cache if
6660                                         # it seems invalid.
6661                                         if blocker_data is not None and \
6662                                                 blockers is not None:
6663                                                 if not blockers.symmetric_difference(
6664                                                         blocker_data.atoms):
6665                                                         continue
6666                                                 blocker_data = None
6667
6668                                         if blocker_data is None and \
6669                                                 blockers is not None:
6670                                                 # Re-use the blockers from the graph.
6671                                                 blocker_atoms = sorted(blockers)
6672                                                 counter = long(pkg.metadata["COUNTER"])
6673                                                 blocker_data = \
6674                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6675                                                 blocker_cache[pkg.cpv] = blocker_data
6676                                                 continue
6677
6678                                         if blocker_data:
6679                                                 blocker_atoms = blocker_data.atoms
6680                                         else:
6681                                                 # Use aux_get() to trigger FakeVartree global
6682                                                 # updates on *DEPEND when appropriate.
6683                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6684                                                 # It is crucial to pass in final_db here in order to
6685                                                 # optimize dep_check calls by eliminating atoms via
6686                                                 # dep_wordreduce and dep_eval calls.
6687                                                 try:
6688                                                         portage.dep._dep_check_strict = False
6689                                                         try:
6690                                                                 success, atoms = portage.dep_check(depstr,
6691                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6692                                                                         trees=self._graph_trees, myroot=myroot)
6693                                                         except Exception, e:
6694                                                                 if isinstance(e, SystemExit):
6695                                                                         raise
6696                                                                 # This is helpful, for example, if a ValueError
6697                                                                 # is thrown from cpv_expand due to multiple
6698                                                                 # matches (this can happen if an atom lacks a
6699                                                                 # category).
6700                                                                 show_invalid_depstring_notice(
6701                                                                         pkg, depstr, str(e))
6702                                                                 del e
6703                                                                 raise
6704                                                 finally:
6705                                                         portage.dep._dep_check_strict = True
6706                                                 if not success:
6707                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6708                                                         if replacement_pkg and \
6709                                                                 replacement_pkg[0].operation == "merge":
6710                                                                 # This package is being replaced anyway, so
6711                                                                 # ignore invalid dependencies so as not to
6712                                                                 # annoy the user too much (otherwise they'd be
6713                                                                 # forced to manually unmerge it first).
6714                                                                 continue
6715                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6716                                                         return False
6717                                                 blocker_atoms = [myatom for myatom in atoms \
6718                                                         if myatom.startswith("!")]
6719                                                 blocker_atoms.sort()
6720                                                 counter = long(pkg.metadata["COUNTER"])
6721                                                 blocker_cache[cpv] = \
6722                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6723                                         if blocker_atoms:
6724                                                 try:
6725                                                         for atom in blocker_atoms:
6726                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6727                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6728                                                                 self._blocker_parents.add(blocker, pkg)
6729                                                 except portage.exception.InvalidAtom, e:
6730                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6731                                                         show_invalid_depstring_notice(
6732                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6733                                                         return False
6734                                 for cpv in stale_cache:
6735                                         del blocker_cache[cpv]
6736                                 blocker_cache.flush()
6737                                 del blocker_cache
6738
6739                 # Discard any "uninstall" tasks scheduled by previous calls
6740                 # to this method, since those tasks may not make sense given
6741                 # the current graph state.
6742                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6743                 if previous_uninstall_tasks:
6744                         self._blocker_uninstalls = digraph()
6745                         self.digraph.difference_update(previous_uninstall_tasks)
6746
6747                 for blocker in self._blocker_parents.leaf_nodes():
6748                         self.spinner.update()
6749                         root_config = self.roots[blocker.root]
6750                         virtuals = root_config.settings.getvirtuals()
6751                         myroot = blocker.root
6752                         initial_db = self.trees[myroot]["vartree"].dbapi
6753                         final_db = self.mydbapi[myroot]
6754                         
6755                         provider_virtual = False
6756                         if blocker.cp in virtuals and \
6757                                 not self._have_new_virt(blocker.root, blocker.cp):
6758                                 provider_virtual = True
6759
6760                         # Use this to check PROVIDE for each matched package
6761                         # when necessary.
6762                         atom_set = InternalPackageSet(
6763                                 initial_atoms=[blocker.atom])
6764
6765                         if provider_virtual:
6766                                 atoms = []
6767                                 for provider_entry in virtuals[blocker.cp]:
6768                                         provider_cp = \
6769                                                 portage.dep_getkey(provider_entry)
6770                                         atoms.append(blocker.atom.replace(
6771                                                 blocker.cp, provider_cp))
6772                         else:
6773                                 atoms = [blocker.atom]
6774
6775                         blocked_initial = set()
6776                         for atom in atoms:
6777                                 for pkg in initial_db.match_pkgs(atom):
6778                                         if atom_set.findAtomForPackage(pkg):
6779                                                 blocked_initial.add(pkg)
6780
6781                         blocked_final = set()
6782                         for atom in atoms:
6783                                 for pkg in final_db.match_pkgs(atom):
6784                                         if atom_set.findAtomForPackage(pkg):
6785                                                 blocked_final.add(pkg)
6786
6787                         if not blocked_initial and not blocked_final:
6788                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6789                                 self._blocker_parents.remove(blocker)
6790                                 # Discard any parents that don't have any more blockers.
6791                                 for pkg in parent_pkgs:
6792                                         self._irrelevant_blockers.add(blocker, pkg)
6793                                         if not self._blocker_parents.child_nodes(pkg):
6794                                                 self._blocker_parents.remove(pkg)
6795                                 continue
6796                         for parent in self._blocker_parents.parent_nodes(blocker):
6797                                 unresolved_blocks = False
6798                                 depends_on_order = set()
6799                                 for pkg in blocked_initial:
6800                                         if pkg.slot_atom == parent.slot_atom:
6801                                                 # TODO: Support blocks within slots in cases where it
6802                                                 # might make sense.  For example, a new version might
6803                                                 # require that the old version be uninstalled at build
6804                                                 # time.
6805                                                 continue
6806                                         if parent.installed:
6807                                                 # Two currently installed packages conflict with
6808                                                 # eachother. Ignore this case since the damage
6809                                                 # is already done and this would be likely to
6810                                                 # confuse users if displayed like a normal blocker.
6811                                                 continue
6812
6813                                         self._blocked_pkgs.add(pkg, blocker)
6814
6815                                         if parent.operation == "merge":
6816                                                 # Maybe the blocked package can be replaced or simply
6817                                                 # unmerged to resolve this block.
6818                                                 depends_on_order.add((pkg, parent))
6819                                                 continue
6820                                         # None of the above blocker resolutions techniques apply,
6821                                         # so apparently this one is unresolvable.
6822                                         unresolved_blocks = True
6823                                 for pkg in blocked_final:
6824                                         if pkg.slot_atom == parent.slot_atom:
6825                                                 # TODO: Support blocks within slots.
6826                                                 continue
6827                                         if parent.operation == "nomerge" and \
6828                                                 pkg.operation == "nomerge":
6829                                                 # This blocker will be handled the next time that a
6830                                                 # merge of either package is triggered.
6831                                                 continue
6832
6833                                         self._blocked_pkgs.add(pkg, blocker)
6834
6835                                         # Maybe the blocking package can be
6836                                         # unmerged to resolve this block.
6837                                         if parent.operation == "merge" and pkg.installed:
6838                                                 depends_on_order.add((pkg, parent))
6839                                                 continue
6840                                         elif parent.operation == "nomerge":
6841                                                 depends_on_order.add((parent, pkg))
6842                                                 continue
6843                                         # None of the above blocker resolutions techniques apply,
6844                                         # so apparently this one is unresolvable.
6845                                         unresolved_blocks = True
6846
6847                                 # Make sure we don't unmerge any package that have been pulled
6848                                 # into the graph.
6849                                 if not unresolved_blocks and depends_on_order:
6850                                         for inst_pkg, inst_task in depends_on_order:
6851                                                 if self.digraph.contains(inst_pkg) and \
6852                                                         self.digraph.parent_nodes(inst_pkg):
6853                                                         unresolved_blocks = True
6854                                                         break
6855
6856                                 if not unresolved_blocks and depends_on_order:
6857                                         for inst_pkg, inst_task in depends_on_order:
6858                                                 uninst_task = Package(built=inst_pkg.built,
6859                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6860                                                         metadata=inst_pkg.metadata,
6861                                                         operation="uninstall",
6862                                                         root_config=inst_pkg.root_config,
6863                                                         type_name=inst_pkg.type_name)
6864                                                 self._pkg_cache[uninst_task] = uninst_task
6865                                                 # Enforce correct merge order with a hard dep.
6866                                                 self.digraph.addnode(uninst_task, inst_task,
6867                                                         priority=BlockerDepPriority.instance)
6868                                                 # Count references to this blocker so that it can be
6869                                                 # invalidated after nodes referencing it have been
6870                                                 # merged.
6871                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6872                                 if not unresolved_blocks and not depends_on_order:
6873                                         self._irrelevant_blockers.add(blocker, parent)
6874                                         self._blocker_parents.remove_edge(blocker, parent)
6875                                         if not self._blocker_parents.parent_nodes(blocker):
6876                                                 self._blocker_parents.remove(blocker)
6877                                         if not self._blocker_parents.child_nodes(parent):
6878                                                 self._blocker_parents.remove(parent)
6879                                 if unresolved_blocks:
6880                                         self._unsolvable_blockers.add(blocker, parent)
6881
6882                 return True
6883
6884         def _accept_blocker_conflicts(self):
6885                 acceptable = False
6886                 for x in ("--buildpkgonly", "--fetchonly",
6887                         "--fetch-all-uri", "--nodeps"):
6888                         if x in self.myopts:
6889                                 acceptable = True
6890                                 break
6891                 return acceptable
6892
6893         def _merge_order_bias(self, mygraph):
6894                 """
6895                 For optimal leaf node selection, promote deep system runtime deps and
6896                 order nodes from highest to lowest overall reference count.
6897                 """
6898
6899                 node_info = {}
6900                 for node in mygraph.order:
6901                         node_info[node] = len(mygraph.parent_nodes(node))
6902                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6903
6904                 def cmp_merge_preference(node1, node2):
6905
6906                         if node1.operation == 'uninstall':
6907                                 if node2.operation == 'uninstall':
6908                                         return 0
6909                                 return 1
6910
6911                         if node2.operation == 'uninstall':
6912                                 if node1.operation == 'uninstall':
6913                                         return 0
6914                                 return -1
6915
6916                         node1_sys = node1 in deep_system_deps
6917                         node2_sys = node2 in deep_system_deps
6918                         if node1_sys != node2_sys:
6919                                 if node1_sys:
6920                                         return -1
6921                                 return 1
6922
6923                         return node_info[node2] - node_info[node1]
6924
6925                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6926
6927         def altlist(self, reversed=False):
6928
6929                 while self._serialized_tasks_cache is None:
6930                         self._resolve_conflicts()
6931                         try:
6932                                 self._serialized_tasks_cache, self._scheduler_graph = \
6933                                         self._serialize_tasks()
6934                         except self._serialize_tasks_retry:
6935                                 pass
6936
6937                 retlist = self._serialized_tasks_cache[:]
6938                 if reversed:
6939                         retlist.reverse()
6940                 return retlist
6941
6942         def schedulerGraph(self):
6943                 """
6944                 The scheduler graph is identical to the normal one except that
6945                 uninstall edges are reversed in specific cases that require
6946                 conflicting packages to be temporarily installed simultaneously.
6947                 This is intended for use by the Scheduler in it's parallelization
6948                 logic. It ensures that temporary simultaneous installation of
6949                 conflicting packages is avoided when appropriate (especially for
6950                 !!atom blockers), but allowed in specific cases that require it.
6951
6952                 Note that this method calls break_refs() which alters the state of
6953                 internal Package instances such that this depgraph instance should
6954                 not be used to perform any more calculations.
6955                 """
6956                 if self._scheduler_graph is None:
6957                         self.altlist()
6958                 self.break_refs(self._scheduler_graph.order)
6959                 return self._scheduler_graph
6960
6961         def break_refs(self, nodes):
6962                 """
6963                 Take a mergelist like that returned from self.altlist() and
6964                 break any references that lead back to the depgraph. This is
6965                 useful if you want to hold references to packages without
6966                 also holding the depgraph on the heap.
6967                 """
6968                 for node in nodes:
6969                         if hasattr(node, "root_config"):
6970                                 # The FakeVartree references the _package_cache which
6971                                 # references the depgraph. So that Package instances don't
6972                                 # hold the depgraph and FakeVartree on the heap, replace
6973                                 # the RootConfig that references the FakeVartree with the
6974                                 # original RootConfig instance which references the actual
6975                                 # vartree.
6976                                 node.root_config = \
6977                                         self._trees_orig[node.root_config.root]["root_config"]
6978
6979         def _resolve_conflicts(self):
6980                 if not self._complete_graph():
6981                         raise self._unknown_internal_error()
6982
6983                 if not self.validate_blockers():
6984                         raise self._unknown_internal_error()
6985
6986                 if self._slot_collision_info:
6987                         self._process_slot_conflicts()
6988
6989         def _serialize_tasks(self):
6990
6991                 if "--debug" in self.myopts:
6992                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6993                         self.digraph.debug_print()
6994                         writemsg("\n", noiselevel=-1)
6995
6996                 scheduler_graph = self.digraph.copy()
6997                 mygraph=self.digraph.copy()
6998                 # Prune "nomerge" root nodes if nothing depends on them, since
6999                 # otherwise they slow down merge order calculation. Don't remove
7000                 # non-root nodes since they help optimize merge order in some cases
7001                 # such as revdep-rebuild.
7002                 removed_nodes = set()
7003                 while True:
7004                         for node in mygraph.root_nodes():
7005                                 if not isinstance(node, Package) or \
7006                                         node.installed or node.onlydeps:
7007                                         removed_nodes.add(node)
7008                         if removed_nodes:
7009                                 self.spinner.update()
7010                                 mygraph.difference_update(removed_nodes)
7011                         if not removed_nodes:
7012                                 break
7013                         removed_nodes.clear()
7014                 self._merge_order_bias(mygraph)
7015                 def cmp_circular_bias(n1, n2):
7016                         """
7017                         RDEPEND is stronger than PDEPEND and this function
7018                         measures such a strength bias within a circular
7019                         dependency relationship.
7020                         """
7021                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7022                                 ignore_priority=priority_range.ignore_medium_soft)
7023                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7024                                 ignore_priority=priority_range.ignore_medium_soft)
7025                         if n1_n2_medium == n2_n1_medium:
7026                                 return 0
7027                         elif n1_n2_medium:
7028                                 return 1
7029                         return -1
7030                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7031                 retlist=[]
7032                 # Contains uninstall tasks that have been scheduled to
7033                 # occur after overlapping blockers have been installed.
7034                 scheduled_uninstalls = set()
7035                 # Contains any Uninstall tasks that have been ignored
7036                 # in order to avoid the circular deps code path. These
7037                 # correspond to blocker conflicts that could not be
7038                 # resolved.
7039                 ignored_uninstall_tasks = set()
7040                 have_uninstall_task = False
7041                 complete = "complete" in self.myparams
7042                 asap_nodes = []
7043
7044                 def get_nodes(**kwargs):
7045                         """
7046                         Returns leaf nodes excluding Uninstall instances
7047                         since those should be executed as late as possible.
7048                         """
7049                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7050                                 if isinstance(node, Package) and \
7051                                         (node.operation != "uninstall" or \
7052                                         node in scheduled_uninstalls)]
7053
7054                 # sys-apps/portage needs special treatment if ROOT="/"
7055                 running_root = self._running_root.root
7056                 from portage.const import PORTAGE_PACKAGE_ATOM
7057                 runtime_deps = InternalPackageSet(
7058                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7059                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7060                         PORTAGE_PACKAGE_ATOM)
7061                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7062                         PORTAGE_PACKAGE_ATOM)
7063
7064                 if running_portage:
7065                         running_portage = running_portage[0]
7066                 else:
7067                         running_portage = None
7068
7069                 if replacement_portage:
7070                         replacement_portage = replacement_portage[0]
7071                 else:
7072                         replacement_portage = None
7073
7074                 if replacement_portage == running_portage:
7075                         replacement_portage = None
7076
7077                 if replacement_portage is not None:
7078                         # update from running_portage to replacement_portage asap
7079                         asap_nodes.append(replacement_portage)
7080
7081                 if running_portage is not None:
7082                         try:
7083                                 portage_rdepend = self._select_atoms_highest_available(
7084                                         running_root, running_portage.metadata["RDEPEND"],
7085                                         myuse=running_portage.use.enabled,
7086                                         parent=running_portage, strict=False)
7087                         except portage.exception.InvalidDependString, e:
7088                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7089                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7090                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7091                                 del e
7092                                 portage_rdepend = []
7093                         runtime_deps.update(atom for atom in portage_rdepend \
7094                                 if not atom.startswith("!"))
7095
7096                 def gather_deps(ignore_priority, mergeable_nodes,
7097                         selected_nodes, node):
7098                         """
7099                         Recursively gather a group of nodes that RDEPEND on
7100                         eachother. This ensures that they are merged as a group
7101                         and get their RDEPENDs satisfied as soon as possible.
7102                         """
7103                         if node in selected_nodes:
7104                                 return True
7105                         if node not in mergeable_nodes:
7106                                 return False
7107                         if node == replacement_portage and \
7108                                 mygraph.child_nodes(node,
7109                                 ignore_priority=priority_range.ignore_medium_soft):
7110                                 # Make sure that portage always has all of it's
7111                                 # RDEPENDs installed first.
7112                                 return False
7113                         selected_nodes.add(node)
7114                         for child in mygraph.child_nodes(node,
7115                                 ignore_priority=ignore_priority):
7116                                 if not gather_deps(ignore_priority,
7117                                         mergeable_nodes, selected_nodes, child):
7118                                         return False
7119                         return True
7120
7121                 def ignore_uninst_or_med(priority):
7122                         if priority is BlockerDepPriority.instance:
7123                                 return True
7124                         return priority_range.ignore_medium(priority)
7125
7126                 def ignore_uninst_or_med_soft(priority):
7127                         if priority is BlockerDepPriority.instance:
7128                                 return True
7129                         return priority_range.ignore_medium_soft(priority)
7130
7131                 tree_mode = "--tree" in self.myopts
7132                 # Tracks whether or not the current iteration should prefer asap_nodes
7133                 # if available.  This is set to False when the previous iteration
7134                 # failed to select any nodes.  It is reset whenever nodes are
7135                 # successfully selected.
7136                 prefer_asap = True
7137
7138                 # Controls whether or not the current iteration should drop edges that
7139                 # are "satisfied" by installed packages, in order to solve circular
7140                 # dependencies. The deep runtime dependencies of installed packages are
7141                 # not checked in this case (bug #199856), so it must be avoided
7142                 # whenever possible.
7143                 drop_satisfied = False
7144
7145                 # State of variables for successive iterations that loosen the
7146                 # criteria for node selection.
7147                 #
7148                 # iteration   prefer_asap   drop_satisfied
7149                 # 1           True          False
7150                 # 2           False         False
7151                 # 3           False         True
7152                 #
7153                 # If no nodes are selected on the last iteration, it is due to
7154                 # unresolved blockers or circular dependencies.
7155
7156                 while not mygraph.empty():
7157                         self.spinner.update()
7158                         selected_nodes = None
7159                         ignore_priority = None
7160                         if drop_satisfied or (prefer_asap and asap_nodes):
7161                                 priority_range = DepPrioritySatisfiedRange
7162                         else:
7163                                 priority_range = DepPriorityNormalRange
7164                         if prefer_asap and asap_nodes:
7165                                 # ASAP nodes are merged before their soft deps. Go ahead and
7166                                 # select root nodes here if necessary, since it's typical for
7167                                 # the parent to have been removed from the graph already.
7168                                 asap_nodes = [node for node in asap_nodes \
7169                                         if mygraph.contains(node)]
7170                                 for node in asap_nodes:
7171                                         if not mygraph.child_nodes(node,
7172                                                 ignore_priority=priority_range.ignore_soft):
7173                                                 selected_nodes = [node]
7174                                                 asap_nodes.remove(node)
7175                                                 break
7176                         if not selected_nodes and \
7177                                 not (prefer_asap and asap_nodes):
7178                                 for i in xrange(priority_range.NONE,
7179                                         priority_range.MEDIUM_SOFT + 1):
7180                                         ignore_priority = priority_range.ignore_priority[i]
7181                                         nodes = get_nodes(ignore_priority=ignore_priority)
7182                                         if nodes:
7183                                                 # If there is a mix of uninstall nodes with other
7184                                                 # types, save the uninstall nodes for later since
7185                                                 # sometimes a merge node will render an uninstall
7186                                                 # node unnecessary (due to occupying the same slot),
7187                                                 # and we want to avoid executing a separate uninstall
7188                                                 # task in that case.
7189                                                 if len(nodes) > 1:
7190                                                         good_uninstalls = []
7191                                                         with_some_uninstalls_excluded = []
7192                                                         for node in nodes:
7193                                                                 if node.operation == "uninstall":
7194                                                                         slot_node = self.mydbapi[node.root
7195                                                                                 ].match_pkgs(node.slot_atom)
7196                                                                         if slot_node and \
7197                                                                                 slot_node[0].operation == "merge":
7198                                                                                 continue
7199                                                                         good_uninstalls.append(node)
7200                                                                 with_some_uninstalls_excluded.append(node)
7201                                                         if good_uninstalls:
7202                                                                 nodes = good_uninstalls
7203                                                         elif with_some_uninstalls_excluded:
7204                                                                 nodes = with_some_uninstalls_excluded
7205                                                         else:
7206                                                                 nodes = nodes
7207
7208                                                 if ignore_priority is None and not tree_mode:
7209                                                         # Greedily pop all of these nodes since no
7210                                                         # relationship has been ignored. This optimization
7211                                                         # destroys --tree output, so it's disabled in tree
7212                                                         # mode.
7213                                                         selected_nodes = nodes
7214                                                 else:
7215                                                         # For optimal merge order:
7216                                                         #  * Only pop one node.
7217                                                         #  * Removing a root node (node without a parent)
7218                                                         #    will not produce a leaf node, so avoid it.
7219                                                         #  * It's normal for a selected uninstall to be a
7220                                                         #    root node, so don't check them for parents.
7221                                                         for node in nodes:
7222                                                                 if node.operation == "uninstall" or \
7223                                                                         mygraph.parent_nodes(node):
7224                                                                         selected_nodes = [node]
7225                                                                         break
7226
7227                                                 if selected_nodes:
7228                                                         break
7229
7230                         if not selected_nodes:
7231                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7232                                 if nodes:
7233                                         mergeable_nodes = set(nodes)
7234                                         if prefer_asap and asap_nodes:
7235                                                 nodes = asap_nodes
7236                                         for i in xrange(priority_range.SOFT,
7237                                                 priority_range.MEDIUM_SOFT + 1):
7238                                                 ignore_priority = priority_range.ignore_priority[i]
7239                                                 for node in nodes:
7240                                                         if not mygraph.parent_nodes(node):
7241                                                                 continue
7242                                                         selected_nodes = set()
7243                                                         if gather_deps(ignore_priority,
7244                                                                 mergeable_nodes, selected_nodes, node):
7245                                                                 break
7246                                                         else:
7247                                                                 selected_nodes = None
7248                                                 if selected_nodes:
7249                                                         break
7250
7251                                         if prefer_asap and asap_nodes and not selected_nodes:
7252                                                 # We failed to find any asap nodes to merge, so ignore
7253                                                 # them for the next iteration.
7254                                                 prefer_asap = False
7255                                                 continue
7256
7257                         if selected_nodes and ignore_priority is not None:
7258                                 # Try to merge ignored medium_soft deps as soon as possible
7259                                 # if they're not satisfied by installed packages.
7260                                 for node in selected_nodes:
7261                                         children = set(mygraph.child_nodes(node))
7262                                         soft = children.difference(
7263                                                 mygraph.child_nodes(node,
7264                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7265                                         medium_soft = children.difference(
7266                                                 mygraph.child_nodes(node,
7267                                                         ignore_priority = \
7268                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7269                                         medium_soft.difference_update(soft)
7270                                         for child in medium_soft:
7271                                                 if child in selected_nodes:
7272                                                         continue
7273                                                 if child in asap_nodes:
7274                                                         continue
7275                                                 asap_nodes.append(child)
7276
7277                         if selected_nodes and len(selected_nodes) > 1:
7278                                 if not isinstance(selected_nodes, list):
7279                                         selected_nodes = list(selected_nodes)
7280                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7281
7282                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7283                                 # An Uninstall task needs to be executed in order to
7284                                 # avoid conflict if possible.
7285
7286                                 if drop_satisfied:
7287                                         priority_range = DepPrioritySatisfiedRange
7288                                 else:
7289                                         priority_range = DepPriorityNormalRange
7290
7291                                 mergeable_nodes = get_nodes(
7292                                         ignore_priority=ignore_uninst_or_med)
7293
7294                                 min_parent_deps = None
7295                                 uninst_task = None
7296                                 for task in myblocker_uninstalls.leaf_nodes():
7297                                         # Do some sanity checks so that system or world packages
7298                                         # don't get uninstalled inappropriately here (only really
7299                                         # necessary when --complete-graph has not been enabled).
7300
7301                                         if task in ignored_uninstall_tasks:
7302                                                 continue
7303
7304                                         if task in scheduled_uninstalls:
7305                                                 # It's been scheduled but it hasn't
7306                                                 # been executed yet due to dependence
7307                                                 # on installation of blocking packages.
7308                                                 continue
7309
7310                                         root_config = self.roots[task.root]
7311                                         inst_pkg = self._pkg_cache[
7312                                                 ("installed", task.root, task.cpv, "nomerge")]
7313
7314                                         if self.digraph.contains(inst_pkg):
7315                                                 continue
7316
7317                                         forbid_overlap = False
7318                                         heuristic_overlap = False
7319                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7320                                                 if blocker.eapi in ("0", "1"):
7321                                                         heuristic_overlap = True
7322                                                 elif blocker.atom.blocker.overlap.forbid:
7323                                                         forbid_overlap = True
7324                                                         break
7325                                         if forbid_overlap and running_root == task.root:
7326                                                 continue
7327
7328                                         if heuristic_overlap and running_root == task.root:
7329                                                 # Never uninstall sys-apps/portage or it's essential
7330                                                 # dependencies, except through replacement.
7331                                                 try:
7332                                                         runtime_dep_atoms = \
7333                                                                 list(runtime_deps.iterAtomsForPackage(task))
7334                                                 except portage.exception.InvalidDependString, e:
7335                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7336                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7337                                                                 (task.root, task.cpv, e), noiselevel=-1)
7338                                                         del e
7339                                                         continue
7340
7341                                                 # Don't uninstall a runtime dep if it appears
7342                                                 # to be the only suitable one installed.
7343                                                 skip = False
7344                                                 vardb = root_config.trees["vartree"].dbapi
7345                                                 for atom in runtime_dep_atoms:
7346                                                         other_version = None
7347                                                         for pkg in vardb.match_pkgs(atom):
7348                                                                 if pkg.cpv == task.cpv and \
7349                                                                         pkg.metadata["COUNTER"] == \
7350                                                                         task.metadata["COUNTER"]:
7351                                                                         continue
7352                                                                 other_version = pkg
7353                                                                 break
7354                                                         if other_version is None:
7355                                                                 skip = True
7356                                                                 break
7357                                                 if skip:
7358                                                         continue
7359
7360                                                 # For packages in the system set, don't take
7361                                                 # any chances. If the conflict can't be resolved
7362                                                 # by a normal replacement operation then abort.
7363                                                 skip = False
7364                                                 try:
7365                                                         for atom in root_config.sets[
7366                                                                 "system"].iterAtomsForPackage(task):
7367                                                                 skip = True
7368                                                                 break
7369                                                 except portage.exception.InvalidDependString, e:
7370                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7371                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7372                                                                 (task.root, task.cpv, e), noiselevel=-1)
7373                                                         del e
7374                                                         skip = True
7375                                                 if skip:
7376                                                         continue
7377
7378                                         # Note that the world check isn't always
7379                                         # necessary since self._complete_graph() will
7380                                         # add all packages from the system and world sets to the
7381                                         # graph. This just allows unresolved conflicts to be
7382                                         # detected as early as possible, which makes it possible
7383                                         # to avoid calling self._complete_graph() when it is
7384                                         # unnecessary due to blockers triggering an abortion.
7385                                         if not complete:
7386                                                 # For packages in the world set, go ahead an uninstall
7387                                                 # when necessary, as long as the atom will be satisfied
7388                                                 # in the final state.
7389                                                 graph_db = self.mydbapi[task.root]
7390                                                 skip = False
7391                                                 try:
7392                                                         for atom in root_config.sets[
7393                                                                 "world"].iterAtomsForPackage(task):
7394                                                                 satisfied = False
7395                                                                 for pkg in graph_db.match_pkgs(atom):
7396                                                                         if pkg == inst_pkg:
7397                                                                                 continue
7398                                                                         satisfied = True
7399                                                                         break
7400                                                                 if not satisfied:
7401                                                                         skip = True
7402                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7403                                                                         break
7404                                                 except portage.exception.InvalidDependString, e:
7405                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7406                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7407                                                                 (task.root, task.cpv, e), noiselevel=-1)
7408                                                         del e
7409                                                         skip = True
7410                                                 if skip:
7411                                                         continue
7412
7413                                         # Check the deps of parent nodes to ensure that
7414                                         # the chosen task produces a leaf node. Maybe
7415                                         # this can be optimized some more to make the
7416                                         # best possible choice, but the current algorithm
7417                                         # is simple and should be near optimal for most
7418                                         # common cases.
7419                                         mergeable_parent = False
7420                                         parent_deps = set()
7421                                         for parent in mygraph.parent_nodes(task):
7422                                                 parent_deps.update(mygraph.child_nodes(parent,
7423                                                         ignore_priority=priority_range.ignore_medium_soft))
7424                                                 if parent in mergeable_nodes and \
7425                                                         gather_deps(ignore_uninst_or_med_soft,
7426                                                         mergeable_nodes, set(), parent):
7427                                                         mergeable_parent = True
7428
7429                                         if not mergeable_parent:
7430                                                 continue
7431
7432                                         parent_deps.remove(task)
7433                                         if min_parent_deps is None or \
7434                                                 len(parent_deps) < min_parent_deps:
7435                                                 min_parent_deps = len(parent_deps)
7436                                                 uninst_task = task
7437
7438                                 if uninst_task is not None:
7439                                         # The uninstall is performed only after blocking
7440                                         # packages have been merged on top of it. File
7441                                         # collisions between blocking packages are detected
7442                                         # and removed from the list of files to be uninstalled.
7443                                         scheduled_uninstalls.add(uninst_task)
7444                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7445
7446                                         # Reverse the parent -> uninstall edges since we want
7447                                         # to do the uninstall after blocking packages have
7448                                         # been merged on top of it.
7449                                         mygraph.remove(uninst_task)
7450                                         for blocked_pkg in parent_nodes:
7451                                                 mygraph.add(blocked_pkg, uninst_task,
7452                                                         priority=BlockerDepPriority.instance)
7453                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7454                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7455                                                         priority=BlockerDepPriority.instance)
7456
7457                                         # Reset the state variables for leaf node selection and
7458                                         # continue trying to select leaf nodes.
7459                                         prefer_asap = True
7460                                         drop_satisfied = False
7461                                         continue
7462
7463                         if not selected_nodes:
7464                                 # Only select root nodes as a last resort. This case should
7465                                 # only trigger when the graph is nearly empty and the only
7466                                 # remaining nodes are isolated (no parents or children). Since
7467                                 # the nodes must be isolated, ignore_priority is not needed.
7468                                 selected_nodes = get_nodes()
7469
7470                         if not selected_nodes and not drop_satisfied:
7471                                 drop_satisfied = True
7472                                 continue
7473
7474                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7475                                 # If possible, drop an uninstall task here in order to avoid
7476                                 # the circular deps code path. The corresponding blocker will
7477                                 # still be counted as an unresolved conflict.
7478                                 uninst_task = None
7479                                 for node in myblocker_uninstalls.leaf_nodes():
7480                                         try:
7481                                                 mygraph.remove(node)
7482                                         except KeyError:
7483                                                 pass
7484                                         else:
7485                                                 uninst_task = node
7486                                                 ignored_uninstall_tasks.add(node)
7487                                                 break
7488
7489                                 if uninst_task is not None:
7490                                         # Reset the state variables for leaf node selection and
7491                                         # continue trying to select leaf nodes.
7492                                         prefer_asap = True
7493                                         drop_satisfied = False
7494                                         continue
7495
7496                         if not selected_nodes:
7497                                 self._circular_deps_for_display = mygraph
7498                                 raise self._unknown_internal_error()
7499
7500                         # At this point, we've succeeded in selecting one or more nodes, so
7501                         # reset state variables for leaf node selection.
7502                         prefer_asap = True
7503                         drop_satisfied = False
7504
7505                         mygraph.difference_update(selected_nodes)
7506
7507                         for node in selected_nodes:
7508                                 if isinstance(node, Package) and \
7509                                         node.operation == "nomerge":
7510                                         continue
7511
7512                                 # Handle interactions between blockers
7513                                 # and uninstallation tasks.
7514                                 solved_blockers = set()
7515                                 uninst_task = None
7516                                 if isinstance(node, Package) and \
7517                                         "uninstall" == node.operation:
7518                                         have_uninstall_task = True
7519                                         uninst_task = node
7520                                 else:
7521                                         vardb = self.trees[node.root]["vartree"].dbapi
7522                                         previous_cpv = vardb.match(node.slot_atom)
7523                                         if previous_cpv:
7524                                                 # The package will be replaced by this one, so remove
7525                                                 # the corresponding Uninstall task if necessary.
7526                                                 previous_cpv = previous_cpv[0]
7527                                                 uninst_task = \
7528                                                         ("installed", node.root, previous_cpv, "uninstall")
7529                                                 try:
7530                                                         mygraph.remove(uninst_task)
7531                                                 except KeyError:
7532                                                         pass
7533
7534                                 if uninst_task is not None and \
7535                                         uninst_task not in ignored_uninstall_tasks and \
7536                                         myblocker_uninstalls.contains(uninst_task):
7537                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7538                                         myblocker_uninstalls.remove(uninst_task)
7539                                         # Discard any blockers that this Uninstall solves.
7540                                         for blocker in blocker_nodes:
7541                                                 if not myblocker_uninstalls.child_nodes(blocker):
7542                                                         myblocker_uninstalls.remove(blocker)
7543                                                         solved_blockers.add(blocker)
7544
7545                                 retlist.append(node)
7546
7547                                 if (isinstance(node, Package) and \
7548                                         "uninstall" == node.operation) or \
7549                                         (uninst_task is not None and \
7550                                         uninst_task in scheduled_uninstalls):
7551                                         # Include satisfied blockers in the merge list
7552                                         # since the user might be interested and also
7553                                         # it serves as an indicator that blocking packages
7554                                         # will be temporarily installed simultaneously.
7555                                         for blocker in solved_blockers:
7556                                                 retlist.append(Blocker(atom=blocker.atom,
7557                                                         root=blocker.root, eapi=blocker.eapi,
7558                                                         satisfied=True))
7559
7560                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7561                 for node in myblocker_uninstalls.root_nodes():
7562                         unsolvable_blockers.add(node)
7563
7564                 for blocker in unsolvable_blockers:
7565                         retlist.append(blocker)
7566
7567                 # If any Uninstall tasks need to be executed in order
7568                 # to avoid a conflict, complete the graph with any
7569                 # dependencies that may have been initially
7570                 # neglected (to ensure that unsafe Uninstall tasks
7571                 # are properly identified and blocked from execution).
7572                 if have_uninstall_task and \
7573                         not complete and \
7574                         not unsolvable_blockers:
7575                         self.myparams.add("complete")
7576                         raise self._serialize_tasks_retry("")
7577
7578                 if unsolvable_blockers and \
7579                         not self._accept_blocker_conflicts():
7580                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7581                         self._serialized_tasks_cache = retlist[:]
7582                         self._scheduler_graph = scheduler_graph
7583                         raise self._unknown_internal_error()
7584
7585                 if self._slot_collision_info and \
7586                         not self._accept_blocker_conflicts():
7587                         self._serialized_tasks_cache = retlist[:]
7588                         self._scheduler_graph = scheduler_graph
7589                         raise self._unknown_internal_error()
7590
7591                 return retlist, scheduler_graph
7592
7593         def _show_circular_deps(self, mygraph):
7594                 # No leaf nodes are available, so we have a circular
7595                 # dependency panic situation.  Reduce the noise level to a
7596                 # minimum via repeated elimination of root nodes since they
7597                 # have no parents and thus can not be part of a cycle.
7598                 while True:
7599                         root_nodes = mygraph.root_nodes(
7600                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7601                         if not root_nodes:
7602                                 break
7603                         mygraph.difference_update(root_nodes)
7604                 # Display the USE flags that are enabled on nodes that are part
7605                 # of dependency cycles in case that helps the user decide to
7606                 # disable some of them.
7607                 display_order = []
7608                 tempgraph = mygraph.copy()
7609                 while not tempgraph.empty():
7610                         nodes = tempgraph.leaf_nodes()
7611                         if not nodes:
7612                                 node = tempgraph.order[0]
7613                         else:
7614                                 node = nodes[0]
7615                         display_order.append(node)
7616                         tempgraph.remove(node)
7617                 display_order.reverse()
7618                 self.myopts.pop("--quiet", None)
7619                 self.myopts.pop("--verbose", None)
7620                 self.myopts["--tree"] = True
7621                 portage.writemsg("\n\n", noiselevel=-1)
7622                 self.display(display_order)
7623                 prefix = colorize("BAD", " * ")
7624                 portage.writemsg("\n", noiselevel=-1)
7625                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7626                         noiselevel=-1)
7627                 portage.writemsg("\n", noiselevel=-1)
7628                 mygraph.debug_print()
7629                 portage.writemsg("\n", noiselevel=-1)
7630                 portage.writemsg(prefix + "Note that circular dependencies " + \
7631                         "can often be avoided by temporarily\n", noiselevel=-1)
7632                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7633                         "optional dependencies.\n", noiselevel=-1)
7634
7635         def _show_merge_list(self):
7636                 if self._serialized_tasks_cache is not None and \
7637                         not (self._displayed_list and \
7638                         (self._displayed_list == self._serialized_tasks_cache or \
7639                         self._displayed_list == \
7640                                 list(reversed(self._serialized_tasks_cache)))):
7641                         display_list = self._serialized_tasks_cache[:]
7642                         if "--tree" in self.myopts:
7643                                 display_list.reverse()
7644                         self.display(display_list)
7645
7646         def _show_unsatisfied_blockers(self, blockers):
7647                 self._show_merge_list()
7648                 msg = "Error: The above package list contains " + \
7649                         "packages which cannot be installed " + \
7650                         "at the same time on the same system."
7651                 prefix = colorize("BAD", " * ")
7652                 from textwrap import wrap
7653                 portage.writemsg("\n", noiselevel=-1)
7654                 for line in wrap(msg, 70):
7655                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7656
7657                 # Display the conflicting packages along with the packages
7658                 # that pulled them in. This is helpful for troubleshooting
7659                 # cases in which blockers don't solve automatically and
7660                 # the reasons are not apparent from the normal merge list
7661                 # display.
7662
7663                 conflict_pkgs = {}
7664                 for blocker in blockers:
7665                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7666                                 self._blocker_parents.parent_nodes(blocker)):
7667                                 parent_atoms = self._parent_atoms.get(pkg)
7668                                 if not parent_atoms:
7669                                         atom = self._blocked_world_pkgs.get(pkg)
7670                                         if atom is not None:
7671                                                 parent_atoms = set([("@world", atom)])
7672                                 if parent_atoms:
7673                                         conflict_pkgs[pkg] = parent_atoms
7674
7675                 if conflict_pkgs:
7676                         # Reduce noise by pruning packages that are only
7677                         # pulled in by other conflict packages.
7678                         pruned_pkgs = set()
7679                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7680                                 relevant_parent = False
7681                                 for parent, atom in parent_atoms:
7682                                         if parent not in conflict_pkgs:
7683                                                 relevant_parent = True
7684                                                 break
7685                                 if not relevant_parent:
7686                                         pruned_pkgs.add(pkg)
7687                         for pkg in pruned_pkgs:
7688                                 del conflict_pkgs[pkg]
7689
7690                 if conflict_pkgs:
7691                         msg = []
7692                         msg.append("\n")
7693                         indent = "  "
7694                         # Max number of parents shown, to avoid flooding the display.
7695                         max_parents = 3
7696                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7697
7698                                 pruned_list = set()
7699
7700                                 # Prefer packages that are not directly involved in a conflict.
7701                                 for parent_atom in parent_atoms:
7702                                         if len(pruned_list) >= max_parents:
7703                                                 break
7704                                         parent, atom = parent_atom
7705                                         if parent not in conflict_pkgs:
7706                                                 pruned_list.add(parent_atom)
7707
7708                                 for parent_atom in parent_atoms:
7709                                         if len(pruned_list) >= max_parents:
7710                                                 break
7711                                         pruned_list.add(parent_atom)
7712
7713                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7714                                 msg.append(indent + "%s pulled in by\n" % pkg)
7715
7716                                 for parent_atom in pruned_list:
7717                                         parent, atom = parent_atom
7718                                         msg.append(2*indent)
7719                                         if isinstance(parent,
7720                                                 (PackageArg, AtomArg)):
7721                                                 # For PackageArg and AtomArg types, it's
7722                                                 # redundant to display the atom attribute.
7723                                                 msg.append(str(parent))
7724                                         else:
7725                                                 # Display the specific atom from SetArg or
7726                                                 # Package types.
7727                                                 msg.append("%s required by %s" % (atom, parent))
7728                                         msg.append("\n")
7729
7730                                 if omitted_parents:
7731                                         msg.append(2*indent)
7732                                         msg.append("(and %d more)\n" % omitted_parents)
7733
7734                                 msg.append("\n")
7735
7736                         sys.stderr.write("".join(msg))
7737                         sys.stderr.flush()
7738
7739                 if "--quiet" not in self.myopts:
7740                         show_blocker_docs_link()
7741
7742         def display(self, mylist, favorites=[], verbosity=None):
7743
7744                 # This is used to prevent display_problems() from
7745                 # redundantly displaying this exact same merge list
7746                 # again via _show_merge_list().
7747                 self._displayed_list = mylist
7748
7749                 if verbosity is None:
7750                         verbosity = ("--quiet" in self.myopts and 1 or \
7751                                 "--verbose" in self.myopts and 3 or 2)
7752                 favorites_set = InternalPackageSet(favorites)
7753                 oneshot = "--oneshot" in self.myopts or \
7754                         "--onlydeps" in self.myopts
7755                 columns = "--columns" in self.myopts
7756                 changelogs=[]
7757                 p=[]
7758                 blockers = []
7759
7760                 counters = PackageCounters()
7761
7762                 if verbosity == 1 and "--verbose" not in self.myopts:
7763                         def create_use_string(*args):
7764                                 return ""
7765                 else:
7766                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7767                                 old_iuse, old_use,
7768                                 is_new, reinst_flags,
7769                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7770                                 alphabetical=("--alphabetical" in self.myopts)):
7771                                 enabled = []
7772                                 if alphabetical:
7773                                         disabled = enabled
7774                                         removed = enabled
7775                                 else:
7776                                         disabled = []
7777                                         removed = []
7778                                 cur_iuse = set(cur_iuse)
7779                                 enabled_flags = cur_iuse.intersection(cur_use)
7780                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7781                                 any_iuse = cur_iuse.union(old_iuse)
7782                                 any_iuse = list(any_iuse)
7783                                 any_iuse.sort()
7784                                 for flag in any_iuse:
7785                                         flag_str = None
7786                                         isEnabled = False
7787                                         reinst_flag = reinst_flags and flag in reinst_flags
7788                                         if flag in enabled_flags:
7789                                                 isEnabled = True
7790                                                 if is_new or flag in old_use and \
7791                                                         (all_flags or reinst_flag):
7792                                                         flag_str = red(flag)
7793                                                 elif flag not in old_iuse:
7794                                                         flag_str = yellow(flag) + "%*"
7795                                                 elif flag not in old_use:
7796                                                         flag_str = green(flag) + "*"
7797                                         elif flag in removed_iuse:
7798                                                 if all_flags or reinst_flag:
7799                                                         flag_str = yellow("-" + flag) + "%"
7800                                                         if flag in old_use:
7801                                                                 flag_str += "*"
7802                                                         flag_str = "(" + flag_str + ")"
7803                                                         removed.append(flag_str)
7804                                                 continue
7805                                         else:
7806                                                 if is_new or flag in old_iuse and \
7807                                                         flag not in old_use and \
7808                                                         (all_flags or reinst_flag):
7809                                                         flag_str = blue("-" + flag)
7810                                                 elif flag not in old_iuse:
7811                                                         flag_str = yellow("-" + flag)
7812                                                         if flag not in iuse_forced:
7813                                                                 flag_str += "%"
7814                                                 elif flag in old_use:
7815                                                         flag_str = green("-" + flag) + "*"
7816                                         if flag_str:
7817                                                 if flag in iuse_forced:
7818                                                         flag_str = "(" + flag_str + ")"
7819                                                 if isEnabled:
7820                                                         enabled.append(flag_str)
7821                                                 else:
7822                                                         disabled.append(flag_str)
7823
7824                                 if alphabetical:
7825                                         ret = " ".join(enabled)
7826                                 else:
7827                                         ret = " ".join(enabled + disabled + removed)
7828                                 if ret:
7829                                         ret = '%s="%s" ' % (name, ret)
7830                                 return ret
7831
7832                 repo_display = RepoDisplay(self.roots)
7833
7834                 tree_nodes = []
7835                 display_list = []
7836                 mygraph = self.digraph.copy()
7837
7838                 # If there are any Uninstall instances, add the corresponding
7839                 # blockers to the digraph (useful for --tree display).
7840
7841                 executed_uninstalls = set(node for node in mylist \
7842                         if isinstance(node, Package) and node.operation == "unmerge")
7843
7844                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7845                         uninstall_parents = \
7846                                 self._blocker_uninstalls.parent_nodes(uninstall)
7847                         if not uninstall_parents:
7848                                 continue
7849
7850                         # Remove the corresponding "nomerge" node and substitute
7851                         # the Uninstall node.
7852                         inst_pkg = self._pkg_cache[
7853                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7854                         try:
7855                                 mygraph.remove(inst_pkg)
7856                         except KeyError:
7857                                 pass
7858
7859                         try:
7860                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7861                         except KeyError:
7862                                 inst_pkg_blockers = []
7863
7864                         # Break the Package -> Uninstall edges.
7865                         mygraph.remove(uninstall)
7866
7867                         # Resolution of a package's blockers
7868                         # depend on it's own uninstallation.
7869                         for blocker in inst_pkg_blockers:
7870                                 mygraph.add(uninstall, blocker)
7871
7872                         # Expand Package -> Uninstall edges into
7873                         # Package -> Blocker -> Uninstall edges.
7874                         for blocker in uninstall_parents:
7875                                 mygraph.add(uninstall, blocker)
7876                                 for parent in self._blocker_parents.parent_nodes(blocker):
7877                                         if parent != inst_pkg:
7878                                                 mygraph.add(blocker, parent)
7879
7880                         # If the uninstall task did not need to be executed because
7881                         # of an upgrade, display Blocker -> Upgrade edges since the
7882                         # corresponding Blocker -> Uninstall edges will not be shown.
7883                         upgrade_node = \
7884                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7885                         if upgrade_node is not None and \
7886                                 uninstall not in executed_uninstalls:
7887                                 for blocker in uninstall_parents:
7888                                         mygraph.add(upgrade_node, blocker)
7889
7890                 unsatisfied_blockers = []
7891                 i = 0
7892                 depth = 0
7893                 shown_edges = set()
7894                 for x in mylist:
7895                         if isinstance(x, Blocker) and not x.satisfied:
7896                                 unsatisfied_blockers.append(x)
7897                                 continue
7898                         graph_key = x
7899                         if "--tree" in self.myopts:
7900                                 depth = len(tree_nodes)
7901                                 while depth and graph_key not in \
7902                                         mygraph.child_nodes(tree_nodes[depth-1]):
7903                                                 depth -= 1
7904                                 if depth:
7905                                         tree_nodes = tree_nodes[:depth]
7906                                         tree_nodes.append(graph_key)
7907                                         display_list.append((x, depth, True))
7908                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7909                                 else:
7910                                         traversed_nodes = set() # prevent endless circles
7911                                         traversed_nodes.add(graph_key)
7912                                         def add_parents(current_node, ordered):
7913                                                 parent_nodes = None
7914                                                 # Do not traverse to parents if this node is an
7915                                                 # an argument or a direct member of a set that has
7916                                                 # been specified as an argument (system or world).
7917                                                 if current_node not in self._set_nodes:
7918                                                         parent_nodes = mygraph.parent_nodes(current_node)
7919                                                 if parent_nodes:
7920                                                         child_nodes = set(mygraph.child_nodes(current_node))
7921                                                         selected_parent = None
7922                                                         # First, try to avoid a direct cycle.
7923                                                         for node in parent_nodes:
7924                                                                 if not isinstance(node, (Blocker, Package)):
7925                                                                         continue
7926                                                                 if node not in traversed_nodes and \
7927                                                                         node not in child_nodes:
7928                                                                         edge = (current_node, node)
7929                                                                         if edge in shown_edges:
7930                                                                                 continue
7931                                                                         selected_parent = node
7932                                                                         break
7933                                                         if not selected_parent:
7934                                                                 # A direct cycle is unavoidable.
7935                                                                 for node in parent_nodes:
7936                                                                         if not isinstance(node, (Blocker, Package)):
7937                                                                                 continue
7938                                                                         if node not in traversed_nodes:
7939                                                                                 edge = (current_node, node)
7940                                                                                 if edge in shown_edges:
7941                                                                                         continue
7942                                                                                 selected_parent = node
7943                                                                                 break
7944                                                         if selected_parent:
7945                                                                 shown_edges.add((current_node, selected_parent))
7946                                                                 traversed_nodes.add(selected_parent)
7947                                                                 add_parents(selected_parent, False)
7948                                                 display_list.append((current_node,
7949                                                         len(tree_nodes), ordered))
7950                                                 tree_nodes.append(current_node)
7951                                         tree_nodes = []
7952                                         add_parents(graph_key, True)
7953                         else:
7954                                 display_list.append((x, depth, True))
7955                 mylist = display_list
7956                 for x in unsatisfied_blockers:
7957                         mylist.append((x, 0, True))
7958
7959                 last_merge_depth = 0
7960                 for i in xrange(len(mylist)-1,-1,-1):
7961                         graph_key, depth, ordered = mylist[i]
7962                         if not ordered and depth == 0 and i > 0 \
7963                                 and graph_key == mylist[i-1][0] and \
7964                                 mylist[i-1][1] == 0:
7965                                 # An ordered node got a consecutive duplicate when the tree was
7966                                 # being filled in.
7967                                 del mylist[i]
7968                                 continue
7969                         if ordered and graph_key[-1] != "nomerge":
7970                                 last_merge_depth = depth
7971                                 continue
7972                         if depth >= last_merge_depth or \
7973                                 i < len(mylist) - 1 and \
7974                                 depth >= mylist[i+1][1]:
7975                                         del mylist[i]
7976
7977                 from portage import flatten
7978                 from portage.dep import use_reduce, paren_reduce
7979                 # files to fetch list - avoids counting a same file twice
7980                 # in size display (verbose mode)
7981                 myfetchlist=[]
7982
7983                 # Use this set to detect when all the "repoadd" strings are "[0]"
7984                 # and disable the entire repo display in this case.
7985                 repoadd_set = set()
7986
7987                 for mylist_index in xrange(len(mylist)):
7988                         x, depth, ordered = mylist[mylist_index]
7989                         pkg_type = x[0]
7990                         myroot = x[1]
7991                         pkg_key = x[2]
7992                         portdb = self.trees[myroot]["porttree"].dbapi
7993                         bindb  = self.trees[myroot]["bintree"].dbapi
7994                         vardb = self.trees[myroot]["vartree"].dbapi
7995                         vartree = self.trees[myroot]["vartree"]
7996                         pkgsettings = self.pkgsettings[myroot]
7997
7998                         fetch=" "
7999                         indent = " " * depth
8000
8001                         if isinstance(x, Blocker):
8002                                 if x.satisfied:
8003                                         blocker_style = "PKG_BLOCKER_SATISFIED"
8004                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
8005                                 else:
8006                                         blocker_style = "PKG_BLOCKER"
8007                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8008                                 if ordered:
8009                                         counters.blocks += 1
8010                                         if x.satisfied:
8011                                                 counters.blocks_satisfied += 1
8012                                 resolved = portage.key_expand(
8013                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8014                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8015                                         addl += " " + colorize(blocker_style, resolved)
8016                                 else:
8017                                         addl = "[%s %s] %s%s" % \
8018                                                 (colorize(blocker_style, "blocks"),
8019                                                 addl, indent, colorize(blocker_style, resolved))
8020                                 block_parents = self._blocker_parents.parent_nodes(x)
8021                                 block_parents = set([pnode[2] for pnode in block_parents])
8022                                 block_parents = ", ".join(block_parents)
8023                                 if resolved!=x[2]:
8024                                         addl += colorize(blocker_style,
8025                                                 " (\"%s\" is blocking %s)") % \
8026                                                 (str(x.atom).lstrip("!"), block_parents)
8027                                 else:
8028                                         addl += colorize(blocker_style,
8029                                                 " (is blocking %s)") % block_parents
8030                                 if isinstance(x, Blocker) and x.satisfied:
8031                                         if columns:
8032                                                 continue
8033                                         p.append(addl)
8034                                 else:
8035                                         blockers.append(addl)
8036                         else:
8037                                 pkg_status = x[3]
8038                                 pkg_merge = ordered and pkg_status == "merge"
8039                                 if not pkg_merge and pkg_status == "merge":
8040                                         pkg_status = "nomerge"
8041                                 built = pkg_type != "ebuild"
8042                                 installed = pkg_type == "installed"
8043                                 pkg = x
8044                                 metadata = pkg.metadata
8045                                 ebuild_path = None
8046                                 repo_name = metadata["repository"]
8047                                 if pkg_type == "ebuild":
8048                                         ebuild_path = portdb.findname(pkg_key)
8049                                         if not ebuild_path: # shouldn't happen
8050                                                 raise portage.exception.PackageNotFound(pkg_key)
8051                                         repo_path_real = os.path.dirname(os.path.dirname(
8052                                                 os.path.dirname(ebuild_path)))
8053                                 else:
8054                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8055                                 pkg_use = list(pkg.use.enabled)
8056                                 try:
8057                                         restrict = flatten(use_reduce(paren_reduce(
8058                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8059                                 except portage.exception.InvalidDependString, e:
8060                                         if not pkg.installed:
8061                                                 show_invalid_depstring_notice(x,
8062                                                         pkg.metadata["RESTRICT"], str(e))
8063                                                 del e
8064                                                 return 1
8065                                         restrict = []
8066                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8067                                         "fetch" in restrict:
8068                                         fetch = red("F")
8069                                         if ordered:
8070                                                 counters.restrict_fetch += 1
8071                                         if portdb.fetch_check(pkg_key, pkg_use):
8072                                                 fetch = green("f")
8073                                                 if ordered:
8074                                                         counters.restrict_fetch_satisfied += 1
8075
8076                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8077                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8078                                 myoldbest = []
8079                                 myinslotlist = None
8080                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8081                                 if vardb.cpv_exists(pkg_key):
8082                                         addl="  "+yellow("R")+fetch+"  "
8083                                         if ordered:
8084                                                 if pkg_merge:
8085                                                         counters.reinst += 1
8086                                                 elif pkg_status == "uninstall":
8087                                                         counters.uninst += 1
8088                                 # filter out old-style virtual matches
8089                                 elif installed_versions and \
8090                                         portage.cpv_getkey(installed_versions[0]) == \
8091                                         portage.cpv_getkey(pkg_key):
8092                                         myinslotlist = vardb.match(pkg.slot_atom)
8093                                         # If this is the first install of a new-style virtual, we
8094                                         # need to filter out old-style virtual matches.
8095                                         if myinslotlist and \
8096                                                 portage.cpv_getkey(myinslotlist[0]) != \
8097                                                 portage.cpv_getkey(pkg_key):
8098                                                 myinslotlist = None
8099                                         if myinslotlist:
8100                                                 myoldbest = myinslotlist[:]
8101                                                 addl = "   " + fetch
8102                                                 if not portage.dep.cpvequal(pkg_key,
8103                                                         portage.best([pkg_key] + myoldbest)):
8104                                                         # Downgrade in slot
8105                                                         addl += turquoise("U")+blue("D")
8106                                                         if ordered:
8107                                                                 counters.downgrades += 1
8108                                                 else:
8109                                                         # Update in slot
8110                                                         addl += turquoise("U") + " "
8111                                                         if ordered:
8112                                                                 counters.upgrades += 1
8113                                         else:
8114                                                 # New slot, mark it new.
8115                                                 addl = " " + green("NS") + fetch + "  "
8116                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8117                                                 if ordered:
8118                                                         counters.newslot += 1
8119
8120                                         if "--changelog" in self.myopts:
8121                                                 inst_matches = vardb.match(pkg.slot_atom)
8122                                                 if inst_matches:
8123                                                         changelogs.extend(self.calc_changelog(
8124                                                                 portdb.findname(pkg_key),
8125                                                                 inst_matches[0], pkg_key))
8126                                 else:
8127                                         addl = " " + green("N") + " " + fetch + "  "
8128                                         if ordered:
8129                                                 counters.new += 1
8130
8131                                 verboseadd = ""
8132                                 repoadd = None
8133
8134                                 if True:
8135                                         # USE flag display
8136                                         forced_flags = set()
8137                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8138                                         forced_flags.update(pkgsettings.useforce)
8139                                         forced_flags.update(pkgsettings.usemask)
8140
8141                                         cur_use = [flag for flag in pkg.use.enabled \
8142                                                 if flag in pkg.iuse.all]
8143                                         cur_iuse = sorted(pkg.iuse.all)
8144
8145                                         if myoldbest and myinslotlist:
8146                                                 previous_cpv = myoldbest[0]
8147                                         else:
8148                                                 previous_cpv = pkg.cpv
8149                                         if vardb.cpv_exists(previous_cpv):
8150                                                 old_iuse, old_use = vardb.aux_get(
8151                                                                 previous_cpv, ["IUSE", "USE"])
8152                                                 old_iuse = list(set(
8153                                                         filter_iuse_defaults(old_iuse.split())))
8154                                                 old_iuse.sort()
8155                                                 old_use = old_use.split()
8156                                                 is_new = False
8157                                         else:
8158                                                 old_iuse = []
8159                                                 old_use = []
8160                                                 is_new = True
8161
8162                                         old_use = [flag for flag in old_use if flag in old_iuse]
8163
8164                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8165                                         use_expand.sort()
8166                                         use_expand.reverse()
8167                                         use_expand_hidden = \
8168                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8169
8170                                         def map_to_use_expand(myvals, forcedFlags=False,
8171                                                 removeHidden=True):
8172                                                 ret = {}
8173                                                 forced = {}
8174                                                 for exp in use_expand:
8175                                                         ret[exp] = []
8176                                                         forced[exp] = set()
8177                                                         for val in myvals[:]:
8178                                                                 if val.startswith(exp.lower()+"_"):
8179                                                                         if val in forced_flags:
8180                                                                                 forced[exp].add(val[len(exp)+1:])
8181                                                                         ret[exp].append(val[len(exp)+1:])
8182                                                                         myvals.remove(val)
8183                                                 ret["USE"] = myvals
8184                                                 forced["USE"] = [val for val in myvals \
8185                                                         if val in forced_flags]
8186                                                 if removeHidden:
8187                                                         for exp in use_expand_hidden:
8188                                                                 ret.pop(exp, None)
8189                                                 if forcedFlags:
8190                                                         return ret, forced
8191                                                 return ret
8192
8193                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8194                                         # are the only thing that triggered reinstallation.
8195                                         reinst_flags_map = {}
8196                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8197                                         reinst_expand_map = None
8198                                         if reinstall_for_flags:
8199                                                 reinst_flags_map = map_to_use_expand(
8200                                                         list(reinstall_for_flags), removeHidden=False)
8201                                                 for k in list(reinst_flags_map):
8202                                                         if not reinst_flags_map[k]:
8203                                                                 del reinst_flags_map[k]
8204                                                 if not reinst_flags_map.get("USE"):
8205                                                         reinst_expand_map = reinst_flags_map.copy()
8206                                                         reinst_expand_map.pop("USE", None)
8207                                         if reinst_expand_map and \
8208                                                 not set(reinst_expand_map).difference(
8209                                                 use_expand_hidden):
8210                                                 use_expand_hidden = \
8211                                                         set(use_expand_hidden).difference(
8212                                                         reinst_expand_map)
8213
8214                                         cur_iuse_map, iuse_forced = \
8215                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8216                                         cur_use_map = map_to_use_expand(cur_use)
8217                                         old_iuse_map = map_to_use_expand(old_iuse)
8218                                         old_use_map = map_to_use_expand(old_use)
8219
8220                                         use_expand.sort()
8221                                         use_expand.insert(0, "USE")
8222                                         
8223                                         for key in use_expand:
8224                                                 if key in use_expand_hidden:
8225                                                         continue
8226                                                 verboseadd += create_use_string(key.upper(),
8227                                                         cur_iuse_map[key], iuse_forced[key],
8228                                                         cur_use_map[key], old_iuse_map[key],
8229                                                         old_use_map[key], is_new,
8230                                                         reinst_flags_map.get(key))
8231
8232                                 if verbosity == 3:
8233                                         # size verbose
8234                                         mysize=0
8235                                         if pkg_type == "ebuild" and pkg_merge:
8236                                                 try:
8237                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8238                                                                 useflags=pkg_use, debug=self.edebug)
8239                                                 except portage.exception.InvalidDependString, e:
8240                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8241                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8242                                                         del e
8243                                                         return 1
8244                                                 if myfilesdict is None:
8245                                                         myfilesdict="[empty/missing/bad digest]"
8246                                                 else:
8247                                                         for myfetchfile in myfilesdict:
8248                                                                 if myfetchfile not in myfetchlist:
8249                                                                         mysize+=myfilesdict[myfetchfile]
8250                                                                         myfetchlist.append(myfetchfile)
8251                                                         if ordered:
8252                                                                 counters.totalsize += mysize
8253                                                 verboseadd += format_size(mysize)
8254
8255                                         # overlay verbose
8256                                         # assign index for a previous version in the same slot
8257                                         has_previous = False
8258                                         repo_name_prev = None
8259                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8260                                                 metadata["SLOT"])
8261                                         slot_matches = vardb.match(slot_atom)
8262                                         if slot_matches:
8263                                                 has_previous = True
8264                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8265                                                         ["repository"])[0]
8266
8267                                         # now use the data to generate output
8268                                         if pkg.installed or not has_previous:
8269                                                 repoadd = repo_display.repoStr(repo_path_real)
8270                                         else:
8271                                                 repo_path_prev = None
8272                                                 if repo_name_prev:
8273                                                         repo_path_prev = portdb.getRepositoryPath(
8274                                                                 repo_name_prev)
8275                                                 if repo_path_prev == repo_path_real:
8276                                                         repoadd = repo_display.repoStr(repo_path_real)
8277                                                 else:
8278                                                         repoadd = "%s=>%s" % (
8279                                                                 repo_display.repoStr(repo_path_prev),
8280                                                                 repo_display.repoStr(repo_path_real))
8281                                         if repoadd:
8282                                                 repoadd_set.add(repoadd)
8283
8284                                 xs = [portage.cpv_getkey(pkg_key)] + \
8285                                         list(portage.catpkgsplit(pkg_key)[2:])
8286                                 if xs[2] == "r0":
8287                                         xs[2] = ""
8288                                 else:
8289                                         xs[2] = "-" + xs[2]
8290
8291                                 mywidth = 130
8292                                 if "COLUMNWIDTH" in self.settings:
8293                                         try:
8294                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8295                                         except ValueError, e:
8296                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8297                                                 portage.writemsg(
8298                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8299                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8300                                                 del e
8301                                 oldlp = mywidth - 30
8302                                 newlp = oldlp - 30
8303
8304                                 # Convert myoldbest from a list to a string.
8305                                 if not myoldbest:
8306                                         myoldbest = ""
8307                                 else:
8308                                         for pos, key in enumerate(myoldbest):
8309                                                 key = portage.catpkgsplit(key)[2] + \
8310                                                         "-" + portage.catpkgsplit(key)[3]
8311                                                 if key[-3:] == "-r0":
8312                                                         key = key[:-3]
8313                                                 myoldbest[pos] = key
8314                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8315
8316                                 pkg_cp = xs[0]
8317                                 root_config = self.roots[myroot]
8318                                 system_set = root_config.sets["system"]
8319                                 world_set  = root_config.sets["world"]
8320
8321                                 pkg_system = False
8322                                 pkg_world = False
8323                                 try:
8324                                         pkg_system = system_set.findAtomForPackage(pkg)
8325                                         pkg_world  = world_set.findAtomForPackage(pkg)
8326                                         if not (oneshot or pkg_world) and \
8327                                                 myroot == self.target_root and \
8328                                                 favorites_set.findAtomForPackage(pkg):
8329                                                 # Maybe it will be added to world now.
8330                                                 if create_world_atom(pkg, favorites_set, root_config):
8331                                                         pkg_world = True
8332                                 except portage.exception.InvalidDependString:
8333                                         # This is reported elsewhere if relevant.
8334                                         pass
8335
8336                                 def pkgprint(pkg_str):
8337                                         if pkg_merge:
8338                                                 if pkg_system:
8339                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8340                                                 elif pkg_world:
8341                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8342                                                 else:
8343                                                         return colorize("PKG_MERGE", pkg_str)
8344                                         elif pkg_status == "uninstall":
8345                                                 return colorize("PKG_UNINSTALL", pkg_str)
8346                                         else:
8347                                                 if pkg_system:
8348                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8349                                                 elif pkg_world:
8350                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8351                                                 else:
8352                                                         return colorize("PKG_NOMERGE", pkg_str)
8353
8354                                 try:
8355                                         properties = flatten(use_reduce(paren_reduce(
8356                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8357                                 except portage.exception.InvalidDependString, e:
8358                                         if not pkg.installed:
8359                                                 show_invalid_depstring_notice(pkg,
8360                                                         pkg.metadata["PROPERTIES"], str(e))
8361                                                 del e
8362                                                 return 1
8363                                         properties = []
8364                                 interactive = "interactive" in properties
8365                                 if interactive and pkg.operation == "merge":
8366                                         addl = colorize("WARN", "I") + addl[1:]
8367                                         if ordered:
8368                                                 counters.interactive += 1
8369
8370                                 if x[1]!="/":
8371                                         if myoldbest:
8372                                                 myoldbest +=" "
8373                                         if "--columns" in self.myopts:
8374                                                 if "--quiet" in self.myopts:
8375                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8376                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8377                                                         myprint=myprint+myoldbest
8378                                                         myprint=myprint+darkgreen("to "+x[1])
8379                                                         verboseadd = None
8380                                                 else:
8381                                                         if not pkg_merge:
8382                                                                 myprint = "[%s] %s%s" % \
8383                                                                         (pkgprint(pkg_status.ljust(13)),
8384                                                                         indent, pkgprint(pkg.cp))
8385                                                         else:
8386                                                                 myprint = "[%s %s] %s%s" % \
8387                                                                         (pkgprint(pkg.type_name), addl,
8388                                                                         indent, pkgprint(pkg.cp))
8389                                                         if (newlp-nc_len(myprint)) > 0:
8390                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8391                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8392                                                         if (oldlp-nc_len(myprint)) > 0:
8393                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8394                                                         myprint=myprint+myoldbest
8395                                                         myprint += darkgreen("to " + pkg.root)
8396                                         else:
8397                                                 if not pkg_merge:
8398                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8399                                                 else:
8400                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8401                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8402                                                         myoldbest + darkgreen("to " + myroot)
8403                                 else:
8404                                         if "--columns" in self.myopts:
8405                                                 if "--quiet" in self.myopts:
8406                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8407                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8408                                                         myprint=myprint+myoldbest
8409                                                         verboseadd = None
8410                                                 else:
8411                                                         if not pkg_merge:
8412                                                                 myprint = "[%s] %s%s" % \
8413                                                                         (pkgprint(pkg_status.ljust(13)),
8414                                                                         indent, pkgprint(pkg.cp))
8415                                                         else:
8416                                                                 myprint = "[%s %s] %s%s" % \
8417                                                                         (pkgprint(pkg.type_name), addl,
8418                                                                         indent, pkgprint(pkg.cp))
8419                                                         if (newlp-nc_len(myprint)) > 0:
8420                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8421                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8422                                                         if (oldlp-nc_len(myprint)) > 0:
8423                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8424                                                         myprint += myoldbest
8425                                         else:
8426                                                 if not pkg_merge:
8427                                                         myprint = "[%s] %s%s %s" % \
8428                                                                 (pkgprint(pkg_status.ljust(13)),
8429                                                                 indent, pkgprint(pkg.cpv),
8430                                                                 myoldbest)
8431                                                 else:
8432                                                         myprint = "[%s %s] %s%s %s" % \
8433                                                                 (pkgprint(pkg_type), addl, indent,
8434                                                                 pkgprint(pkg.cpv), myoldbest)
8435
8436                                 if columns and pkg.operation == "uninstall":
8437                                         continue
8438                                 p.append((myprint, verboseadd, repoadd))
8439
8440                                 if "--tree" not in self.myopts and \
8441                                         "--quiet" not in self.myopts and \
8442                                         not self._opts_no_restart.intersection(self.myopts) and \
8443                                         pkg.root == self._running_root.root and \
8444                                         portage.match_from_list(
8445                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8446                                         not vardb.cpv_exists(pkg.cpv) and \
8447                                         "--quiet" not in self.myopts:
8448                                                 if mylist_index < len(mylist) - 1:
8449                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8450                                                         p.append(colorize("WARN", "    then resume the merge."))
8451
8452                 out = sys.stdout
8453                 show_repos = repoadd_set and repoadd_set != set(["0"])
8454
8455                 for x in p:
8456                         if isinstance(x, basestring):
8457                                 out.write("%s\n" % (x,))
8458                                 continue
8459
8460                         myprint, verboseadd, repoadd = x
8461
8462                         if verboseadd:
8463                                 myprint += " " + verboseadd
8464
8465                         if show_repos and repoadd:
8466                                 myprint += " " + teal("[%s]" % repoadd)
8467
8468                         out.write("%s\n" % (myprint,))
8469
8470                 for x in blockers:
8471                         print x
8472
8473                 if verbosity == 3:
8474                         print
8475                         print counters
8476                         if show_repos:
8477                                 sys.stdout.write(str(repo_display))
8478
8479                 if "--changelog" in self.myopts:
8480                         print
8481                         for revision,text in changelogs:
8482                                 print bold('*'+revision)
8483                                 sys.stdout.write(text)
8484
8485                 sys.stdout.flush()
8486                 return os.EX_OK
8487
8488         def display_problems(self):
8489                 """
8490                 Display problems with the dependency graph such as slot collisions.
8491                 This is called internally by display() to show the problems _after_
8492                 the merge list where it is most likely to be seen, but if display()
8493                 is not going to be called then this method should be called explicitly
8494                 to ensure that the user is notified of problems with the graph.
8495
8496                 All output goes to stderr, except for unsatisfied dependencies which
8497                 go to stdout for parsing by programs such as autounmask.
8498                 """
8499
8500                 # Note that show_masked_packages() sends it's output to
8501                 # stdout, and some programs such as autounmask parse the
8502                 # output in cases when emerge bails out. However, when
8503                 # show_masked_packages() is called for installed packages
8504                 # here, the message is a warning that is more appropriate
8505                 # to send to stderr, so temporarily redirect stdout to
8506                 # stderr. TODO: Fix output code so there's a cleaner way
8507                 # to redirect everything to stderr.
8508                 sys.stdout.flush()
8509                 sys.stderr.flush()
8510                 stdout = sys.stdout
8511                 try:
8512                         sys.stdout = sys.stderr
8513                         self._display_problems()
8514                 finally:
8515                         sys.stdout = stdout
8516                         sys.stdout.flush()
8517                         sys.stderr.flush()
8518
8519                 # This goes to stdout for parsing by programs like autounmask.
8520                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8521                         self._show_unsatisfied_dep(*pargs, **kwargs)
8522
8523         def _display_problems(self):
8524                 if self._circular_deps_for_display is not None:
8525                         self._show_circular_deps(
8526                                 self._circular_deps_for_display)
8527
8528                 # The user is only notified of a slot conflict if
8529                 # there are no unresolvable blocker conflicts.
8530                 if self._unsatisfied_blockers_for_display is not None:
8531                         self._show_unsatisfied_blockers(
8532                                 self._unsatisfied_blockers_for_display)
8533                 else:
8534                         self._show_slot_collision_notice()
8535
8536                 # TODO: Add generic support for "set problem" handlers so that
8537                 # the below warnings aren't special cases for world only.
8538
8539                 if self._missing_args:
8540                         world_problems = False
8541                         if "world" in self._sets:
8542                                 # Filter out indirect members of world (from nested sets)
8543                                 # since only direct members of world are desired here.
8544                                 world_set = self.roots[self.target_root].sets["world"]
8545                                 for arg, atom in self._missing_args:
8546                                         if arg.name == "world" and atom in world_set:
8547                                                 world_problems = True
8548                                                 break
8549
8550                         if world_problems:
8551                                 sys.stderr.write("\n!!! Problems have been " + \
8552                                         "detected with your world file\n")
8553                                 sys.stderr.write("!!! Please run " + \
8554                                         green("emaint --check world")+"\n\n")
8555
8556                 if self._missing_args:
8557                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8558                                 " Ebuilds for the following packages are either all\n")
8559                         sys.stderr.write(colorize("BAD", "!!!") + \
8560                                 " masked or don't exist:\n")
8561                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8562                                 self._missing_args) + "\n")
8563
8564                 if self._pprovided_args:
8565                         arg_refs = {}
8566                         for arg, atom in self._pprovided_args:
8567                                 if isinstance(arg, SetArg):
8568                                         parent = arg.name
8569                                         arg_atom = (atom, atom)
8570                                 else:
8571                                         parent = "args"
8572                                         arg_atom = (arg.arg, atom)
8573                                 refs = arg_refs.setdefault(arg_atom, [])
8574                                 if parent not in refs:
8575                                         refs.append(parent)
8576                         msg = []
8577                         msg.append(bad("\nWARNING: "))
8578                         if len(self._pprovided_args) > 1:
8579                                 msg.append("Requested packages will not be " + \
8580                                         "merged because they are listed in\n")
8581                         else:
8582                                 msg.append("A requested package will not be " + \
8583                                         "merged because it is listed in\n")
8584                         msg.append("package.provided:\n\n")
8585                         problems_sets = set()
8586                         for (arg, atom), refs in arg_refs.iteritems():
8587                                 ref_string = ""
8588                                 if refs:
8589                                         problems_sets.update(refs)
8590                                         refs.sort()
8591                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8592                                         ref_string = " pulled in by " + ref_string
8593                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8594                         msg.append("\n")
8595                         if "world" in problems_sets:
8596                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8597                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8598                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8599                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8600                                 msg.append("The best course of action depends on the reason that an offending\n")
8601                                 msg.append("package.provided entry exists.\n\n")
8602                         sys.stderr.write("".join(msg))
8603
8604                 masked_packages = []
8605                 for pkg in self._masked_installed:
8606                         root_config = pkg.root_config
8607                         pkgsettings = self.pkgsettings[pkg.root]
8608                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8609                         masked_packages.append((root_config, pkgsettings,
8610                                 pkg.cpv, pkg.metadata, mreasons))
8611                 if masked_packages:
8612                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8613                                 " The following installed packages are masked:\n")
8614                         show_masked_packages(masked_packages)
8615                         show_mask_docs()
8616                         print
8617
8618         def calc_changelog(self,ebuildpath,current,next):
8619                 if ebuildpath == None or not os.path.exists(ebuildpath):
8620                         return []
8621                 current = '-'.join(portage.catpkgsplit(current)[1:])
8622                 if current.endswith('-r0'):
8623                         current = current[:-3]
8624                 next = '-'.join(portage.catpkgsplit(next)[1:])
8625                 if next.endswith('-r0'):
8626                         next = next[:-3]
8627                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8628                 try:
8629                         changelog = open(changelogpath).read()
8630                 except SystemExit, e:
8631                         raise # Needed else can't exit
8632                 except:
8633                         return []
8634                 divisions = self.find_changelog_tags(changelog)
8635                 #print 'XX from',current,'to',next
8636                 #for div,text in divisions: print 'XX',div
8637                 # skip entries for all revisions above the one we are about to emerge
8638                 for i in range(len(divisions)):
8639                         if divisions[i][0]==next:
8640                                 divisions = divisions[i:]
8641                                 break
8642                 # find out how many entries we are going to display
8643                 for i in range(len(divisions)):
8644                         if divisions[i][0]==current:
8645                                 divisions = divisions[:i]
8646                                 break
8647                 else:
8648                     # couldnt find the current revision in the list. display nothing
8649                         return []
8650                 return divisions
8651
8652         def find_changelog_tags(self,changelog):
8653                 divs = []
8654                 release = None
8655                 while 1:
8656                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8657                         if match is None:
8658                                 if release is not None:
8659                                         divs.append((release,changelog))
8660                                 return divs
8661                         if release is not None:
8662                                 divs.append((release,changelog[:match.start()]))
8663                         changelog = changelog[match.end():]
8664                         release = match.group(1)
8665                         if release.endswith('.ebuild'):
8666                                 release = release[:-7]
8667                         if release.endswith('-r0'):
8668                                 release = release[:-3]
8669
8670         def saveNomergeFavorites(self):
8671                 """Find atoms in favorites that are not in the mergelist and add them
8672                 to the world file if necessary."""
8673                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8674                         "--oneshot", "--onlydeps", "--pretend"):
8675                         if x in self.myopts:
8676                                 return
8677                 root_config = self.roots[self.target_root]
8678                 world_set = root_config.sets["world"]
8679
8680                 world_locked = False
8681                 if hasattr(world_set, "lock"):
8682                         world_set.lock()
8683                         world_locked = True
8684
8685                 if hasattr(world_set, "load"):
8686                         world_set.load() # maybe it's changed on disk
8687
8688                 args_set = self._sets["args"]
8689                 portdb = self.trees[self.target_root]["porttree"].dbapi
8690                 added_favorites = set()
8691                 for x in self._set_nodes:
8692                         pkg_type, root, pkg_key, pkg_status = x
8693                         if pkg_status != "nomerge":
8694                                 continue
8695
8696                         try:
8697                                 myfavkey = create_world_atom(x, args_set, root_config)
8698                                 if myfavkey:
8699                                         if myfavkey in added_favorites:
8700                                                 continue
8701                                         added_favorites.add(myfavkey)
8702                         except portage.exception.InvalidDependString, e:
8703                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8704                                         (pkg_key, str(e)), noiselevel=-1)
8705                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8706                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8707                                 del e
8708                 all_added = []
8709                 for k in self._sets:
8710                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8711                                 continue
8712                         s = SETPREFIX + k
8713                         if s in world_set:
8714                                 continue
8715                         all_added.append(SETPREFIX + k)
8716                 all_added.extend(added_favorites)
8717                 all_added.sort()
8718                 for a in all_added:
8719                         print ">>> Recording %s in \"world\" favorites file..." % \
8720                                 colorize("INFORM", str(a))
8721                 if all_added:
8722                         world_set.update(all_added)
8723
8724                 if world_locked:
8725                         world_set.unlock()
8726
8727         def loadResumeCommand(self, resume_data, skip_masked=False):
8728                 """
8729                 Add a resume command to the graph and validate it in the process.  This
8730                 will raise a PackageNotFound exception if a package is not available.
8731                 """
8732
8733                 if not isinstance(resume_data, dict):
8734                         return False
8735
8736                 mergelist = resume_data.get("mergelist")
8737                 if not isinstance(mergelist, list):
8738                         mergelist = []
8739
8740                 fakedb = self.mydbapi
8741                 trees = self.trees
8742                 serialized_tasks = []
8743                 masked_tasks = []
8744                 for x in mergelist:
8745                         if not (isinstance(x, list) and len(x) == 4):
8746                                 continue
8747                         pkg_type, myroot, pkg_key, action = x
8748                         if pkg_type not in self.pkg_tree_map:
8749                                 continue
8750                         if action != "merge":
8751                                 continue
8752                         tree_type = self.pkg_tree_map[pkg_type]
8753                         mydb = trees[myroot][tree_type].dbapi
8754                         db_keys = list(self._trees_orig[myroot][
8755                                 tree_type].dbapi._aux_cache_keys)
8756                         try:
8757                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8758                         except KeyError:
8759                                 # It does no exist or it is corrupt.
8760                                 if action == "uninstall":
8761                                         continue
8762                                 raise portage.exception.PackageNotFound(pkg_key)
8763                         installed = action == "uninstall"
8764                         built = pkg_type != "ebuild"
8765                         root_config = self.roots[myroot]
8766                         pkg = Package(built=built, cpv=pkg_key,
8767                                 installed=installed, metadata=metadata,
8768                                 operation=action, root_config=root_config,
8769                                 type_name=pkg_type)
8770                         if pkg_type == "ebuild":
8771                                 pkgsettings = self.pkgsettings[myroot]
8772                                 pkgsettings.setcpv(pkg)
8773                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8774                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8775                         self._pkg_cache[pkg] = pkg
8776
8777                         root_config = self.roots[pkg.root]
8778                         if "merge" == pkg.operation and \
8779                                 not visible(root_config.settings, pkg):
8780                                 if skip_masked:
8781                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8782                                 else:
8783                                         self._unsatisfied_deps_for_display.append(
8784                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8785
8786                         fakedb[myroot].cpv_inject(pkg)
8787                         serialized_tasks.append(pkg)
8788                         self.spinner.update()
8789
8790                 if self._unsatisfied_deps_for_display:
8791                         return False
8792
8793                 if not serialized_tasks or "--nodeps" in self.myopts:
8794                         self._serialized_tasks_cache = serialized_tasks
8795                         self._scheduler_graph = self.digraph
8796                 else:
8797                         self._select_package = self._select_pkg_from_graph
8798                         self.myparams.add("selective")
8799                         # Always traverse deep dependencies in order to account for
8800                         # potentially unsatisfied dependencies of installed packages.
8801                         # This is necessary for correct --keep-going or --resume operation
8802                         # in case a package from a group of circularly dependent packages
8803                         # fails. In this case, a package which has recently been installed
8804                         # may have an unsatisfied circular dependency (pulled in by
8805                         # PDEPEND, for example). So, even though a package is already
8806                         # installed, it may not have all of it's dependencies satisfied, so
8807                         # it may not be usable. If such a package is in the subgraph of
8808                         # deep depenedencies of a scheduled build, that build needs to
8809                         # be cancelled. In order for this type of situation to be
8810                         # recognized, deep traversal of dependencies is required.
8811                         self.myparams.add("deep")
8812
8813                         favorites = resume_data.get("favorites")
8814                         args_set = self._sets["args"]
8815                         if isinstance(favorites, list):
8816                                 args = self._load_favorites(favorites)
8817                         else:
8818                                 args = []
8819
8820                         for task in serialized_tasks:
8821                                 if isinstance(task, Package) and \
8822                                         task.operation == "merge":
8823                                         if not self._add_pkg(task, None):
8824                                                 return False
8825
8826                         # Packages for argument atoms need to be explicitly
8827                         # added via _add_pkg() so that they are included in the
8828                         # digraph (needed at least for --tree display).
8829                         for arg in args:
8830                                 for atom in arg.set:
8831                                         pkg, existing_node = self._select_package(
8832                                                 arg.root_config.root, atom)
8833                                         if existing_node is None and \
8834                                                 pkg is not None:
8835                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8836                                                         root=pkg.root, parent=arg)):
8837                                                         return False
8838
8839                         # Allow unsatisfied deps here to avoid showing a masking
8840                         # message for an unsatisfied dep that isn't necessarily
8841                         # masked.
8842                         if not self._create_graph(allow_unsatisfied=True):
8843                                 return False
8844
8845                         unsatisfied_deps = []
8846                         for dep in self._unsatisfied_deps:
8847                                 if not isinstance(dep.parent, Package):
8848                                         continue
8849                                 if dep.parent.operation == "merge":
8850                                         unsatisfied_deps.append(dep)
8851                                         continue
8852
8853                                 # For unsatisfied deps of installed packages, only account for
8854                                 # them if they are in the subgraph of dependencies of a package
8855                                 # which is scheduled to be installed.
8856                                 unsatisfied_install = False
8857                                 traversed = set()
8858                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8859                                 while dep_stack:
8860                                         node = dep_stack.pop()
8861                                         if not isinstance(node, Package):
8862                                                 continue
8863                                         if node.operation == "merge":
8864                                                 unsatisfied_install = True
8865                                                 break
8866                                         if node in traversed:
8867                                                 continue
8868                                         traversed.add(node)
8869                                         dep_stack.extend(self.digraph.parent_nodes(node))
8870
8871                                 if unsatisfied_install:
8872                                         unsatisfied_deps.append(dep)
8873
8874                         if masked_tasks or unsatisfied_deps:
8875                                 # This probably means that a required package
8876                                 # was dropped via --skipfirst. It makes the
8877                                 # resume list invalid, so convert it to a
8878                                 # UnsatisfiedResumeDep exception.
8879                                 raise self.UnsatisfiedResumeDep(self,
8880                                         masked_tasks + unsatisfied_deps)
8881                         self._serialized_tasks_cache = None
8882                         try:
8883                                 self.altlist()
8884                         except self._unknown_internal_error:
8885                                 return False
8886
8887                 return True
8888
8889         def _load_favorites(self, favorites):
8890                 """
8891                 Use a list of favorites to resume state from a
8892                 previous select_files() call. This creates similar
8893                 DependencyArg instances to those that would have
8894                 been created by the original select_files() call.
8895                 This allows Package instances to be matched with
8896                 DependencyArg instances during graph creation.
8897                 """
8898                 root_config = self.roots[self.target_root]
8899                 getSetAtoms = root_config.setconfig.getSetAtoms
8900                 sets = root_config.sets
8901                 args = []
8902                 for x in favorites:
8903                         if not isinstance(x, basestring):
8904                                 continue
8905                         if x in ("system", "world"):
8906                                 x = SETPREFIX + x
8907                         if x.startswith(SETPREFIX):
8908                                 s = x[len(SETPREFIX):]
8909                                 if s not in sets:
8910                                         continue
8911                                 if s in self._sets:
8912                                         continue
8913                                 # Recursively expand sets so that containment tests in
8914                                 # self._get_parent_sets() properly match atoms in nested
8915                                 # sets (like if world contains system).
8916                                 expanded_set = InternalPackageSet(
8917                                         initial_atoms=getSetAtoms(s))
8918                                 self._sets[s] = expanded_set
8919                                 args.append(SetArg(arg=x, set=expanded_set,
8920                                         root_config=root_config))
8921                         else:
8922                                 if not portage.isvalidatom(x):
8923                                         continue
8924                                 args.append(AtomArg(arg=x, atom=x,
8925                                         root_config=root_config))
8926
8927                 self._set_args(args)
8928                 return args
8929
8930         class UnsatisfiedResumeDep(portage.exception.PortageException):
8931                 """
8932                 A dependency of a resume list is not installed. This
8933                 can occur when a required package is dropped from the
8934                 merge list via --skipfirst.
8935                 """
8936                 def __init__(self, depgraph, value):
8937                         portage.exception.PortageException.__init__(self, value)
8938                         self.depgraph = depgraph
8939
8940         class _internal_exception(portage.exception.PortageException):
8941                 def __init__(self, value=""):
8942                         portage.exception.PortageException.__init__(self, value)
8943
8944         class _unknown_internal_error(_internal_exception):
8945                 """
8946                 Used by the depgraph internally to terminate graph creation.
8947                 The specific reason for the failure should have been dumped
8948                 to stderr, unfortunately, the exact reason for the failure
8949                 may not be known.
8950                 """
8951
8952         class _serialize_tasks_retry(_internal_exception):
8953                 """
8954                 This is raised by the _serialize_tasks() method when it needs to
8955                 be called again for some reason. The only case that it's currently
8956                 used for is when neglected dependencies need to be added to the
8957                 graph in order to avoid making a potentially unsafe decision.
8958                 """
8959
8960         class _dep_check_composite_db(portage.dbapi):
8961                 """
8962                 A dbapi-like interface that is optimized for use in dep_check() calls.
8963                 This is built on top of the existing depgraph package selection logic.
8964                 Some packages that have been added to the graph may be masked from this
8965                 view in order to influence the atom preference selection that occurs
8966                 via dep_check().
8967                 """
8968                 def __init__(self, depgraph, root):
8969                         portage.dbapi.__init__(self)
8970                         self._depgraph = depgraph
8971                         self._root = root
8972                         self._match_cache = {}
8973                         self._cpv_pkg_map = {}
8974
8975                 def _clear_cache(self):
8976                         self._match_cache.clear()
8977                         self._cpv_pkg_map.clear()
8978
8979                 def match(self, atom):
8980                         ret = self._match_cache.get(atom)
8981                         if ret is not None:
8982                                 return ret[:]
8983                         orig_atom = atom
8984                         if "/" not in atom:
8985                                 atom = self._dep_expand(atom)
8986                         pkg, existing = self._depgraph._select_package(self._root, atom)
8987                         if not pkg:
8988                                 ret = []
8989                         else:
8990                                 # Return the highest available from select_package() as well as
8991                                 # any matching slots in the graph db.
8992                                 slots = set()
8993                                 slots.add(pkg.metadata["SLOT"])
8994                                 atom_cp = portage.dep_getkey(atom)
8995                                 if pkg.cp.startswith("virtual/"):
8996                                         # For new-style virtual lookahead that occurs inside
8997                                         # dep_check(), examine all slots. This is needed
8998                                         # so that newer slots will not unnecessarily be pulled in
8999                                         # when a satisfying lower slot is already installed. For
9000                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
9001                                         # there's no need to pull in a newer slot to satisfy a
9002                                         # virtual/jdk dependency.
9003                                         for db, pkg_type, built, installed, db_keys in \
9004                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
9005                                                 for cpv in db.match(atom):
9006                                                         if portage.cpv_getkey(cpv) != pkg.cp:
9007                                                                 continue
9008                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9009                                 ret = []
9010                                 if self._visible(pkg):
9011                                         self._cpv_pkg_map[pkg.cpv] = pkg
9012                                         ret.append(pkg.cpv)
9013                                 slots.remove(pkg.metadata["SLOT"])
9014                                 while slots:
9015                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9016                                         pkg, existing = self._depgraph._select_package(
9017                                                 self._root, slot_atom)
9018                                         if not pkg:
9019                                                 continue
9020                                         if not self._visible(pkg):
9021                                                 continue
9022                                         self._cpv_pkg_map[pkg.cpv] = pkg
9023                                         ret.append(pkg.cpv)
9024                                 if ret:
9025                                         self._cpv_sort_ascending(ret)
9026                         self._match_cache[orig_atom] = ret
9027                         return ret[:]
9028
9029                 def _visible(self, pkg):
9030                         if pkg.installed and "selective" not in self._depgraph.myparams:
9031                                 try:
9032                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9033                                 except (StopIteration, portage.exception.InvalidDependString):
9034                                         arg = None
9035                                 if arg:
9036                                         return False
9037                         if pkg.installed:
9038                                 try:
9039                                         if not visible(
9040                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9041                                                 return False
9042                                 except portage.exception.InvalidDependString:
9043                                         pass
9044                         in_graph = self._depgraph._slot_pkg_map[
9045                                 self._root].get(pkg.slot_atom)
9046                         if in_graph is None:
9047                                 # Mask choices for packages which are not the highest visible
9048                                 # version within their slot (since they usually trigger slot
9049                                 # conflicts).
9050                                 highest_visible, in_graph = self._depgraph._select_package(
9051                                         self._root, pkg.slot_atom)
9052                                 if pkg != highest_visible:
9053                                         return False
9054                         elif in_graph != pkg:
9055                                 # Mask choices for packages that would trigger a slot
9056                                 # conflict with a previously selected package.
9057                                 return False
9058                         return True
9059
9060                 def _dep_expand(self, atom):
9061                         """
9062                         This is only needed for old installed packages that may
9063                         contain atoms that are not fully qualified with a specific
9064                         category. Emulate the cpv_expand() function that's used by
9065                         dbapi.match() in cases like this. If there are multiple
9066                         matches, it's often due to a new-style virtual that has
9067                         been added, so try to filter those out to avoid raising
9068                         a ValueError.
9069                         """
9070                         root_config = self._depgraph.roots[self._root]
9071                         orig_atom = atom
9072                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9073                         if len(expanded_atoms) > 1:
9074                                 non_virtual_atoms = []
9075                                 for x in expanded_atoms:
9076                                         if not portage.dep_getkey(x).startswith("virtual/"):
9077                                                 non_virtual_atoms.append(x)
9078                                 if len(non_virtual_atoms) == 1:
9079                                         expanded_atoms = non_virtual_atoms
9080                         if len(expanded_atoms) > 1:
9081                                 # compatible with portage.cpv_expand()
9082                                 raise portage.exception.AmbiguousPackageName(
9083                                         [portage.dep_getkey(x) for x in expanded_atoms])
9084                         if expanded_atoms:
9085                                 atom = expanded_atoms[0]
9086                         else:
9087                                 null_atom = insert_category_into_atom(atom, "null")
9088                                 null_cp = portage.dep_getkey(null_atom)
9089                                 cat, atom_pn = portage.catsplit(null_cp)
9090                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9091                                 if virts_p:
9092                                         # Allow the resolver to choose which virtual.
9093                                         atom = insert_category_into_atom(atom, "virtual")
9094                                 else:
9095                                         atom = insert_category_into_atom(atom, "null")
9096                         return atom
9097
9098                 def aux_get(self, cpv, wants):
9099                         metadata = self._cpv_pkg_map[cpv].metadata
9100                         return [metadata.get(x, "") for x in wants]
9101
9102 class RepoDisplay(object):
9103         def __init__(self, roots):
9104                 self._shown_repos = {}
9105                 self._unknown_repo = False
9106                 repo_paths = set()
9107                 for root_config in roots.itervalues():
9108                         portdir = root_config.settings.get("PORTDIR")
9109                         if portdir:
9110                                 repo_paths.add(portdir)
9111                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9112                         if overlays:
9113                                 repo_paths.update(overlays.split())
9114                 repo_paths = list(repo_paths)
9115                 self._repo_paths = repo_paths
9116                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9117                         for repo_path in repo_paths ]
9118
9119                 # pre-allocate index for PORTDIR so that it always has index 0.
9120                 for root_config in roots.itervalues():
9121                         portdb = root_config.trees["porttree"].dbapi
9122                         portdir = portdb.porttree_root
9123                         if portdir:
9124                                 self.repoStr(portdir)
9125
9126         def repoStr(self, repo_path_real):
9127                 real_index = -1
9128                 if repo_path_real:
9129                         real_index = self._repo_paths_real.index(repo_path_real)
9130                 if real_index == -1:
9131                         s = "?"
9132                         self._unknown_repo = True
9133                 else:
9134                         shown_repos = self._shown_repos
9135                         repo_paths = self._repo_paths
9136                         repo_path = repo_paths[real_index]
9137                         index = shown_repos.get(repo_path)
9138                         if index is None:
9139                                 index = len(shown_repos)
9140                                 shown_repos[repo_path] = index
9141                         s = str(index)
9142                 return s
9143
9144         def __str__(self):
9145                 output = []
9146                 shown_repos = self._shown_repos
9147                 unknown_repo = self._unknown_repo
9148                 if shown_repos or self._unknown_repo:
9149                         output.append("Portage tree and overlays:\n")
9150                 show_repo_paths = list(shown_repos)
9151                 for repo_path, repo_index in shown_repos.iteritems():
9152                         show_repo_paths[repo_index] = repo_path
9153                 if show_repo_paths:
9154                         for index, repo_path in enumerate(show_repo_paths):
9155                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9156                 if unknown_repo:
9157                         output.append(" "+teal("[?]") + \
9158                                 " indicates that the source repository could not be determined\n")
9159                 return "".join(output)
9160
9161 class PackageCounters(object):
9162
9163         def __init__(self):
9164                 self.upgrades   = 0
9165                 self.downgrades = 0
9166                 self.new        = 0
9167                 self.newslot    = 0
9168                 self.reinst     = 0
9169                 self.uninst     = 0
9170                 self.blocks     = 0
9171                 self.blocks_satisfied         = 0
9172                 self.totalsize  = 0
9173                 self.restrict_fetch           = 0
9174                 self.restrict_fetch_satisfied = 0
9175                 self.interactive              = 0
9176
9177         def __str__(self):
9178                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9179                 myoutput = []
9180                 details = []
9181                 myoutput.append("Total: %s package" % total_installs)
9182                 if total_installs != 1:
9183                         myoutput.append("s")
9184                 if total_installs != 0:
9185                         myoutput.append(" (")
9186                 if self.upgrades > 0:
9187                         details.append("%s upgrade" % self.upgrades)
9188                         if self.upgrades > 1:
9189                                 details[-1] += "s"
9190                 if self.downgrades > 0:
9191                         details.append("%s downgrade" % self.downgrades)
9192                         if self.downgrades > 1:
9193                                 details[-1] += "s"
9194                 if self.new > 0:
9195                         details.append("%s new" % self.new)
9196                 if self.newslot > 0:
9197                         details.append("%s in new slot" % self.newslot)
9198                         if self.newslot > 1:
9199                                 details[-1] += "s"
9200                 if self.reinst > 0:
9201                         details.append("%s reinstall" % self.reinst)
9202                         if self.reinst > 1:
9203                                 details[-1] += "s"
9204                 if self.uninst > 0:
9205                         details.append("%s uninstall" % self.uninst)
9206                         if self.uninst > 1:
9207                                 details[-1] += "s"
9208                 if self.interactive > 0:
9209                         details.append("%s %s" % (self.interactive,
9210                                 colorize("WARN", "interactive")))
9211                 myoutput.append(", ".join(details))
9212                 if total_installs != 0:
9213                         myoutput.append(")")
9214                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9215                 if self.restrict_fetch:
9216                         myoutput.append("\nFetch Restriction: %s package" % \
9217                                 self.restrict_fetch)
9218                         if self.restrict_fetch > 1:
9219                                 myoutput.append("s")
9220                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9221                         myoutput.append(bad(" (%s unsatisfied)") % \
9222                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9223                 if self.blocks > 0:
9224                         myoutput.append("\nConflict: %s block" % \
9225                                 self.blocks)
9226                         if self.blocks > 1:
9227                                 myoutput.append("s")
9228                         if self.blocks_satisfied < self.blocks:
9229                                 myoutput.append(bad(" (%s unsatisfied)") % \
9230                                         (self.blocks - self.blocks_satisfied))
9231                 return "".join(myoutput)
9232
9233 class PollSelectAdapter(PollConstants):
9234
9235         """
9236         Use select to emulate a poll object, for
9237         systems that don't support poll().
9238         """
9239
9240         def __init__(self):
9241                 self._registered = {}
9242                 self._select_args = [[], [], []]
9243
9244         def register(self, fd, *args):
9245                 """
9246                 Only POLLIN is currently supported!
9247                 """
9248                 if len(args) > 1:
9249                         raise TypeError(
9250                                 "register expected at most 2 arguments, got " + \
9251                                 repr(1 + len(args)))
9252
9253                 eventmask = PollConstants.POLLIN | \
9254                         PollConstants.POLLPRI | PollConstants.POLLOUT
9255                 if args:
9256                         eventmask = args[0]
9257
9258                 self._registered[fd] = eventmask
9259                 self._select_args = None
9260
9261         def unregister(self, fd):
9262                 self._select_args = None
9263                 del self._registered[fd]
9264
9265         def poll(self, *args):
9266                 if len(args) > 1:
9267                         raise TypeError(
9268                                 "poll expected at most 2 arguments, got " + \
9269                                 repr(1 + len(args)))
9270
9271                 timeout = None
9272                 if args:
9273                         timeout = args[0]
9274
9275                 select_args = self._select_args
9276                 if select_args is None:
9277                         select_args = [self._registered.keys(), [], []]
9278
9279                 if timeout is not None:
9280                         select_args = select_args[:]
9281                         # Translate poll() timeout args to select() timeout args:
9282                         #
9283                         #          | units        | value(s) for indefinite block
9284                         # ---------|--------------|------------------------------
9285                         #   poll   | milliseconds | omitted, negative, or None
9286                         # ---------|--------------|------------------------------
9287                         #   select | seconds      | omitted
9288                         # ---------|--------------|------------------------------
9289
9290                         if timeout is not None and timeout < 0:
9291                                 timeout = None
9292                         if timeout is not None:
9293                                 select_args.append(timeout / 1000)
9294
9295                 select_events = select.select(*select_args)
9296                 poll_events = []
9297                 for fd in select_events[0]:
9298                         poll_events.append((fd, PollConstants.POLLIN))
9299                 return poll_events
9300
9301 class SequentialTaskQueue(SlotObject):
9302
9303         __slots__ = ("max_jobs", "running_tasks") + \
9304                 ("_dirty", "_scheduling", "_task_queue")
9305
9306         def __init__(self, **kwargs):
9307                 SlotObject.__init__(self, **kwargs)
9308                 self._task_queue = deque()
9309                 self.running_tasks = set()
9310                 if self.max_jobs is None:
9311                         self.max_jobs = 1
9312                 self._dirty = True
9313
9314         def add(self, task):
9315                 self._task_queue.append(task)
9316                 self._dirty = True
9317
9318         def addFront(self, task):
9319                 self._task_queue.appendleft(task)
9320                 self._dirty = True
9321
9322         def schedule(self):
9323
9324                 if not self._dirty:
9325                         return False
9326
9327                 if not self:
9328                         return False
9329
9330                 if self._scheduling:
9331                         # Ignore any recursive schedule() calls triggered via
9332                         # self._task_exit().
9333                         return False
9334
9335                 self._scheduling = True
9336
9337                 task_queue = self._task_queue
9338                 running_tasks = self.running_tasks
9339                 max_jobs = self.max_jobs
9340                 state_changed = False
9341
9342                 while task_queue and \
9343                         (max_jobs is True or len(running_tasks) < max_jobs):
9344                         task = task_queue.popleft()
9345                         cancelled = getattr(task, "cancelled", None)
9346                         if not cancelled:
9347                                 running_tasks.add(task)
9348                                 task.addExitListener(self._task_exit)
9349                                 task.start()
9350                         state_changed = True
9351
9352                 self._dirty = False
9353                 self._scheduling = False
9354
9355                 return state_changed
9356
9357         def _task_exit(self, task):
9358                 """
9359                 Since we can always rely on exit listeners being called, the set of
9360                 running tasks is always pruned automatically and there is never any need
9361                 to actively prune it.
9362                 """
9363                 self.running_tasks.remove(task)
9364                 if self._task_queue:
9365                         self._dirty = True
9366
9367         def clear(self):
9368                 self._task_queue.clear()
9369                 running_tasks = self.running_tasks
9370                 while running_tasks:
9371                         task = running_tasks.pop()
9372                         task.removeExitListener(self._task_exit)
9373                         task.cancel()
9374                 self._dirty = False
9375
9376         def __nonzero__(self):
9377                 return bool(self._task_queue or self.running_tasks)
9378
9379         def __len__(self):
9380                 return len(self._task_queue) + len(self.running_tasks)
9381
9382 _can_poll_device = None
9383
9384 def can_poll_device():
9385         """
9386         Test if it's possible to use poll() on a device such as a pty. This
9387         is known to fail on Darwin.
9388         @rtype: bool
9389         @returns: True if poll() on a device succeeds, False otherwise.
9390         """
9391
9392         global _can_poll_device
9393         if _can_poll_device is not None:
9394                 return _can_poll_device
9395
9396         if not hasattr(select, "poll"):
9397                 _can_poll_device = False
9398                 return _can_poll_device
9399
9400         try:
9401                 dev_null = open('/dev/null', 'rb')
9402         except IOError:
9403                 _can_poll_device = False
9404                 return _can_poll_device
9405
9406         p = select.poll()
9407         p.register(dev_null.fileno(), PollConstants.POLLIN)
9408
9409         invalid_request = False
9410         for f, event in p.poll():
9411                 if event & PollConstants.POLLNVAL:
9412                         invalid_request = True
9413                         break
9414         dev_null.close()
9415
9416         _can_poll_device = not invalid_request
9417         return _can_poll_device
9418
9419 def create_poll_instance():
9420         """
9421         Create an instance of select.poll, or an instance of
9422         PollSelectAdapter there is no poll() implementation or
9423         it is broken somehow.
9424         """
9425         if can_poll_device():
9426                 return select.poll()
9427         return PollSelectAdapter()
9428
9429 getloadavg = getattr(os, "getloadavg", None)
9430 if getloadavg is None:
9431         def getloadavg():
9432                 """
9433                 Uses /proc/loadavg to emulate os.getloadavg().
9434                 Raises OSError if the load average was unobtainable.
9435                 """
9436                 try:
9437                         loadavg_str = open('/proc/loadavg').readline()
9438                 except IOError:
9439                         # getloadavg() is only supposed to raise OSError, so convert
9440                         raise OSError('unknown')
9441                 loadavg_split = loadavg_str.split()
9442                 if len(loadavg_split) < 3:
9443                         raise OSError('unknown')
9444                 loadavg_floats = []
9445                 for i in xrange(3):
9446                         try:
9447                                 loadavg_floats.append(float(loadavg_split[i]))
9448                         except ValueError:
9449                                 raise OSError('unknown')
9450                 return tuple(loadavg_floats)
9451
9452 class PollScheduler(object):
9453
9454         class _sched_iface_class(SlotObject):
9455                 __slots__ = ("register", "schedule", "unregister")
9456
9457         def __init__(self):
9458                 self._max_jobs = 1
9459                 self._max_load = None
9460                 self._jobs = 0
9461                 self._poll_event_queue = []
9462                 self._poll_event_handlers = {}
9463                 self._poll_event_handler_ids = {}
9464                 # Increment id for each new handler.
9465                 self._event_handler_id = 0
9466                 self._poll_obj = create_poll_instance()
9467                 self._scheduling = False
9468
9469         def _schedule(self):
9470                 """
9471                 Calls _schedule_tasks() and automatically returns early from
9472                 any recursive calls to this method that the _schedule_tasks()
9473                 call might trigger. This makes _schedule() safe to call from
9474                 inside exit listeners.
9475                 """
9476                 if self._scheduling:
9477                         return False
9478                 self._scheduling = True
9479                 try:
9480                         return self._schedule_tasks()
9481                 finally:
9482                         self._scheduling = False
9483
9484         def _running_job_count(self):
9485                 return self._jobs
9486
9487         def _can_add_job(self):
9488                 max_jobs = self._max_jobs
9489                 max_load = self._max_load
9490
9491                 if self._max_jobs is not True and \
9492                         self._running_job_count() >= self._max_jobs:
9493                         return False
9494
9495                 if max_load is not None and \
9496                         (max_jobs is True or max_jobs > 1) and \
9497                         self._running_job_count() >= 1:
9498                         try:
9499                                 avg1, avg5, avg15 = getloadavg()
9500                         except OSError:
9501                                 return False
9502
9503                         if avg1 >= max_load:
9504                                 return False
9505
9506                 return True
9507
9508         def _poll(self, timeout=None):
9509                 """
9510                 All poll() calls pass through here. The poll events
9511                 are added directly to self._poll_event_queue.
9512                 In order to avoid endless blocking, this raises
9513                 StopIteration if timeout is None and there are
9514                 no file descriptors to poll.
9515                 """
9516                 if not self._poll_event_handlers:
9517                         self._schedule()
9518                         if timeout is None and \
9519                                 not self._poll_event_handlers:
9520                                 raise StopIteration(
9521                                         "timeout is None and there are no poll() event handlers")
9522
9523                 # The following error is known to occur with Linux kernel versions
9524                 # less than 2.6.24:
9525                 #
9526                 #   select.error: (4, 'Interrupted system call')
9527                 #
9528                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9529                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9530                 # without any events.
9531                 while True:
9532                         try:
9533                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9534                                 break
9535                         except select.error, e:
9536                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9537                                         level=logging.ERROR, noiselevel=-1)
9538                                 del e
9539                                 if timeout is not None:
9540                                         break
9541
9542         def _next_poll_event(self, timeout=None):
9543                 """
9544                 Since the _schedule_wait() loop is called by event
9545                 handlers from _poll_loop(), maintain a central event
9546                 queue for both of them to share events from a single
9547                 poll() call. In order to avoid endless blocking, this
9548                 raises StopIteration if timeout is None and there are
9549                 no file descriptors to poll.
9550                 """
9551                 if not self._poll_event_queue:
9552                         self._poll(timeout)
9553                 return self._poll_event_queue.pop()
9554
9555         def _poll_loop(self):
9556
9557                 event_handlers = self._poll_event_handlers
9558                 event_handled = False
9559
9560                 try:
9561                         while event_handlers:
9562                                 f, event = self._next_poll_event()
9563                                 handler, reg_id = event_handlers[f]
9564                                 handler(f, event)
9565                                 event_handled = True
9566                 except StopIteration:
9567                         event_handled = True
9568
9569                 if not event_handled:
9570                         raise AssertionError("tight loop")
9571
9572         def _schedule_yield(self):
9573                 """
9574                 Schedule for a short period of time chosen by the scheduler based
9575                 on internal state. Synchronous tasks should call this periodically
9576                 in order to allow the scheduler to service pending poll events. The
9577                 scheduler will call poll() exactly once, without blocking, and any
9578                 resulting poll events will be serviced.
9579                 """
9580                 event_handlers = self._poll_event_handlers
9581                 events_handled = 0
9582
9583                 if not event_handlers:
9584                         return bool(events_handled)
9585
9586                 if not self._poll_event_queue:
9587                         self._poll(0)
9588
9589                 try:
9590                         while event_handlers and self._poll_event_queue:
9591                                 f, event = self._next_poll_event()
9592                                 handler, reg_id = event_handlers[f]
9593                                 handler(f, event)
9594                                 events_handled += 1
9595                 except StopIteration:
9596                         events_handled += 1
9597
9598                 return bool(events_handled)
9599
9600         def _register(self, f, eventmask, handler):
9601                 """
9602                 @rtype: Integer
9603                 @return: A unique registration id, for use in schedule() or
9604                         unregister() calls.
9605                 """
9606                 if f in self._poll_event_handlers:
9607                         raise AssertionError("fd %d is already registered" % f)
9608                 self._event_handler_id += 1
9609                 reg_id = self._event_handler_id
9610                 self._poll_event_handler_ids[reg_id] = f
9611                 self._poll_event_handlers[f] = (handler, reg_id)
9612                 self._poll_obj.register(f, eventmask)
9613                 return reg_id
9614
9615         def _unregister(self, reg_id):
9616                 f = self._poll_event_handler_ids[reg_id]
9617                 self._poll_obj.unregister(f)
9618                 del self._poll_event_handlers[f]
9619                 del self._poll_event_handler_ids[reg_id]
9620
9621         def _schedule_wait(self, wait_ids):
9622                 """
9623                 Schedule until wait_id is not longer registered
9624                 for poll() events.
9625                 @type wait_id: int
9626                 @param wait_id: a task id to wait for
9627                 """
9628                 event_handlers = self._poll_event_handlers
9629                 handler_ids = self._poll_event_handler_ids
9630                 event_handled = False
9631
9632                 if isinstance(wait_ids, int):
9633                         wait_ids = frozenset([wait_ids])
9634
9635                 try:
9636                         while wait_ids.intersection(handler_ids):
9637                                 f, event = self._next_poll_event()
9638                                 handler, reg_id = event_handlers[f]
9639                                 handler(f, event)
9640                                 event_handled = True
9641                 except StopIteration:
9642                         event_handled = True
9643
9644                 return event_handled
9645
9646 class QueueScheduler(PollScheduler):
9647
9648         """
9649         Add instances of SequentialTaskQueue and then call run(). The
9650         run() method returns when no tasks remain.
9651         """
9652
9653         def __init__(self, max_jobs=None, max_load=None):
9654                 PollScheduler.__init__(self)
9655
9656                 if max_jobs is None:
9657                         max_jobs = 1
9658
9659                 self._max_jobs = max_jobs
9660                 self._max_load = max_load
9661                 self.sched_iface = self._sched_iface_class(
9662                         register=self._register,
9663                         schedule=self._schedule_wait,
9664                         unregister=self._unregister)
9665
9666                 self._queues = []
9667                 self._schedule_listeners = []
9668
9669         def add(self, q):
9670                 self._queues.append(q)
9671
9672         def remove(self, q):
9673                 self._queues.remove(q)
9674
9675         def run(self):
9676
9677                 while self._schedule():
9678                         self._poll_loop()
9679
9680                 while self._running_job_count():
9681                         self._poll_loop()
9682
9683         def _schedule_tasks(self):
9684                 """
9685                 @rtype: bool
9686                 @returns: True if there may be remaining tasks to schedule,
9687                         False otherwise.
9688                 """
9689                 while self._can_add_job():
9690                         n = self._max_jobs - self._running_job_count()
9691                         if n < 1:
9692                                 break
9693
9694                         if not self._start_next_job(n):
9695                                 return False
9696
9697                 for q in self._queues:
9698                         if q:
9699                                 return True
9700                 return False
9701
9702         def _running_job_count(self):
9703                 job_count = 0
9704                 for q in self._queues:
9705                         job_count += len(q.running_tasks)
9706                 self._jobs = job_count
9707                 return job_count
9708
9709         def _start_next_job(self, n=1):
9710                 started_count = 0
9711                 for q in self._queues:
9712                         initial_job_count = len(q.running_tasks)
9713                         q.schedule()
9714                         final_job_count = len(q.running_tasks)
9715                         if final_job_count > initial_job_count:
9716                                 started_count += (final_job_count - initial_job_count)
9717                         if started_count >= n:
9718                                 break
9719                 return started_count
9720
9721 class TaskScheduler(object):
9722
9723         """
9724         A simple way to handle scheduling of AsynchrousTask instances. Simply
9725         add tasks and call run(). The run() method returns when no tasks remain.
9726         """
9727
9728         def __init__(self, max_jobs=None, max_load=None):
9729                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9730                 self._scheduler = QueueScheduler(
9731                         max_jobs=max_jobs, max_load=max_load)
9732                 self.sched_iface = self._scheduler.sched_iface
9733                 self.run = self._scheduler.run
9734                 self._scheduler.add(self._queue)
9735
9736         def add(self, task):
9737                 self._queue.add(task)
9738
9739 class JobStatusDisplay(object):
9740
9741         _bound_properties = ("curval", "failed", "running")
9742         _jobs_column_width = 48
9743
9744         # Don't update the display unless at least this much
9745         # time has passed, in units of seconds.
9746         _min_display_latency = 2
9747
9748         _default_term_codes = {
9749                 'cr'  : '\r',
9750                 'el'  : '\x1b[K',
9751                 'nel' : '\n',
9752         }
9753
9754         _termcap_name_map = {
9755                 'carriage_return' : 'cr',
9756                 'clr_eol'         : 'el',
9757                 'newline'         : 'nel',
9758         }
9759
9760         def __init__(self, out=sys.stdout, quiet=False):
9761                 object.__setattr__(self, "out", out)
9762                 object.__setattr__(self, "quiet", quiet)
9763                 object.__setattr__(self, "maxval", 0)
9764                 object.__setattr__(self, "merges", 0)
9765                 object.__setattr__(self, "_changed", False)
9766                 object.__setattr__(self, "_displayed", False)
9767                 object.__setattr__(self, "_last_display_time", 0)
9768                 object.__setattr__(self, "width", 80)
9769                 self.reset()
9770
9771                 isatty = hasattr(out, "isatty") and out.isatty()
9772                 object.__setattr__(self, "_isatty", isatty)
9773                 if not isatty or not self._init_term():
9774                         term_codes = {}
9775                         for k, capname in self._termcap_name_map.iteritems():
9776                                 term_codes[k] = self._default_term_codes[capname]
9777                         object.__setattr__(self, "_term_codes", term_codes)
9778                 encoding = sys.getdefaultencoding()
9779                 for k, v in self._term_codes.items():
9780                         if not isinstance(v, basestring):
9781                                 self._term_codes[k] = v.decode(encoding, 'replace')
9782
9783         def _init_term(self):
9784                 """
9785                 Initialize term control codes.
9786                 @rtype: bool
9787                 @returns: True if term codes were successfully initialized,
9788                         False otherwise.
9789                 """
9790
9791                 term_type = os.environ.get("TERM", "vt100")
9792                 tigetstr = None
9793
9794                 try:
9795                         import curses
9796                         try:
9797                                 curses.setupterm(term_type, self.out.fileno())
9798                                 tigetstr = curses.tigetstr
9799                         except curses.error:
9800                                 pass
9801                 except ImportError:
9802                         pass
9803
9804                 if tigetstr is None:
9805                         return False
9806
9807                 term_codes = {}
9808                 for k, capname in self._termcap_name_map.iteritems():
9809                         code = tigetstr(capname)
9810                         if code is None:
9811                                 code = self._default_term_codes[capname]
9812                         term_codes[k] = code
9813                 object.__setattr__(self, "_term_codes", term_codes)
9814                 return True
9815
9816         def _format_msg(self, msg):
9817                 return ">>> %s" % msg
9818
9819         def _erase(self):
9820                 self.out.write(
9821                         self._term_codes['carriage_return'] + \
9822                         self._term_codes['clr_eol'])
9823                 self.out.flush()
9824                 self._displayed = False
9825
9826         def _display(self, line):
9827                 self.out.write(line)
9828                 self.out.flush()
9829                 self._displayed = True
9830
9831         def _update(self, msg):
9832
9833                 out = self.out
9834                 if not self._isatty:
9835                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9836                         self.out.flush()
9837                         self._displayed = True
9838                         return
9839
9840                 if self._displayed:
9841                         self._erase()
9842
9843                 self._display(self._format_msg(msg))
9844
9845         def displayMessage(self, msg):
9846
9847                 was_displayed = self._displayed
9848
9849                 if self._isatty and self._displayed:
9850                         self._erase()
9851
9852                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9853                 self.out.flush()
9854                 self._displayed = False
9855
9856                 if was_displayed:
9857                         self._changed = True
9858                         self.display()
9859
9860         def reset(self):
9861                 self.maxval = 0
9862                 self.merges = 0
9863                 for name in self._bound_properties:
9864                         object.__setattr__(self, name, 0)
9865
9866                 if self._displayed:
9867                         self.out.write(self._term_codes['newline'])
9868                         self.out.flush()
9869                         self._displayed = False
9870
9871         def __setattr__(self, name, value):
9872                 old_value = getattr(self, name)
9873                 if value == old_value:
9874                         return
9875                 object.__setattr__(self, name, value)
9876                 if name in self._bound_properties:
9877                         self._property_change(name, old_value, value)
9878
9879         def _property_change(self, name, old_value, new_value):
9880                 self._changed = True
9881                 self.display()
9882
9883         def _load_avg_str(self):
9884                 try:
9885                         avg = getloadavg()
9886                 except OSError:
9887                         return 'unknown'
9888
9889                 max_avg = max(avg)
9890
9891                 if max_avg < 10:
9892                         digits = 2
9893                 elif max_avg < 100:
9894                         digits = 1
9895                 else:
9896                         digits = 0
9897
9898                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9899
9900         def display(self):
9901                 """
9902                 Display status on stdout, but only if something has
9903                 changed since the last call.
9904                 """
9905
9906                 if self.quiet:
9907                         return
9908
9909                 current_time = time.time()
9910                 time_delta = current_time - self._last_display_time
9911                 if self._displayed and \
9912                         not self._changed:
9913                         if not self._isatty:
9914                                 return
9915                         if time_delta < self._min_display_latency:
9916                                 return
9917
9918                 self._last_display_time = current_time
9919                 self._changed = False
9920                 self._display_status()
9921
9922         def _display_status(self):
9923                 # Don't use len(self._completed_tasks) here since that also
9924                 # can include uninstall tasks.
9925                 curval_str = str(self.curval)
9926                 maxval_str = str(self.maxval)
9927                 running_str = str(self.running)
9928                 failed_str = str(self.failed)
9929                 load_avg_str = self._load_avg_str()
9930
9931                 color_output = StringIO()
9932                 plain_output = StringIO()
9933                 style_file = portage.output.ConsoleStyleFile(color_output)
9934                 style_file.write_listener = plain_output
9935                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9936                 style_writer.style_listener = style_file.new_styles
9937                 f = formatter.AbstractFormatter(style_writer)
9938
9939                 number_style = "INFORM"
9940                 f.add_literal_data("Jobs: ")
9941                 f.push_style(number_style)
9942                 f.add_literal_data(curval_str)
9943                 f.pop_style()
9944                 f.add_literal_data(" of ")
9945                 f.push_style(number_style)
9946                 f.add_literal_data(maxval_str)
9947                 f.pop_style()
9948                 f.add_literal_data(" complete")
9949
9950                 if self.running:
9951                         f.add_literal_data(", ")
9952                         f.push_style(number_style)
9953                         f.add_literal_data(running_str)
9954                         f.pop_style()
9955                         f.add_literal_data(" running")
9956
9957                 if self.failed:
9958                         f.add_literal_data(", ")
9959                         f.push_style(number_style)
9960                         f.add_literal_data(failed_str)
9961                         f.pop_style()
9962                         f.add_literal_data(" failed")
9963
9964                 padding = self._jobs_column_width - len(plain_output.getvalue())
9965                 if padding > 0:
9966                         f.add_literal_data(padding * " ")
9967
9968                 f.add_literal_data("Load avg: ")
9969                 f.add_literal_data(load_avg_str)
9970
9971                 # Truncate to fit width, to avoid making the terminal scroll if the
9972                 # line overflows (happens when the load average is large).
9973                 plain_output = plain_output.getvalue()
9974                 if self._isatty and len(plain_output) > self.width:
9975                         # Use plain_output here since it's easier to truncate
9976                         # properly than the color output which contains console
9977                         # color codes.
9978                         self._update(plain_output[:self.width])
9979                 else:
9980                         self._update(color_output.getvalue())
9981
9982                 xtermTitle(" ".join(plain_output.split()))
9983
9984 class Scheduler(PollScheduler):
9985
9986         _opts_ignore_blockers = \
9987                 frozenset(["--buildpkgonly",
9988                 "--fetchonly", "--fetch-all-uri",
9989                 "--nodeps", "--pretend"])
9990
9991         _opts_no_background = \
9992                 frozenset(["--pretend",
9993                 "--fetchonly", "--fetch-all-uri"])
9994
9995         _opts_no_restart = frozenset(["--buildpkgonly",
9996                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9997
9998         _bad_resume_opts = set(["--ask", "--changelog",
9999                 "--resume", "--skipfirst"])
10000
10001         _fetch_log = "/var/log/emerge-fetch.log"
10002
10003         class _iface_class(SlotObject):
10004                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10005                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10006                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
10007                         "unregister")
10008
10009         class _fetch_iface_class(SlotObject):
10010                 __slots__ = ("log_file", "schedule")
10011
10012         _task_queues_class = slot_dict_class(
10013                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10014
10015         class _build_opts_class(SlotObject):
10016                 __slots__ = ("buildpkg", "buildpkgonly",
10017                         "fetch_all_uri", "fetchonly", "pretend")
10018
10019         class _binpkg_opts_class(SlotObject):
10020                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10021
10022         class _pkg_count_class(SlotObject):
10023                 __slots__ = ("curval", "maxval")
10024
10025         class _emerge_log_class(SlotObject):
10026                 __slots__ = ("xterm_titles",)
10027
10028                 def log(self, *pargs, **kwargs):
10029                         if not self.xterm_titles:
10030                                 # Avoid interference with the scheduler's status display.
10031                                 kwargs.pop("short_msg", None)
10032                         emergelog(self.xterm_titles, *pargs, **kwargs)
10033
10034         class _failed_pkg(SlotObject):
10035                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10036
10037         class _ConfigPool(object):
10038                 """Interface for a task to temporarily allocate a config
10039                 instance from a pool. This allows a task to be constructed
10040                 long before the config instance actually becomes needed, like
10041                 when prefetchers are constructed for the whole merge list."""
10042                 __slots__ = ("_root", "_allocate", "_deallocate")
10043                 def __init__(self, root, allocate, deallocate):
10044                         self._root = root
10045                         self._allocate = allocate
10046                         self._deallocate = deallocate
10047                 def allocate(self):
10048                         return self._allocate(self._root)
10049                 def deallocate(self, settings):
10050                         self._deallocate(settings)
10051
10052         class _unknown_internal_error(portage.exception.PortageException):
10053                 """
10054                 Used internally to terminate scheduling. The specific reason for
10055                 the failure should have been dumped to stderr.
10056                 """
10057                 def __init__(self, value=""):
10058                         portage.exception.PortageException.__init__(self, value)
10059
10060         def __init__(self, settings, trees, mtimedb, myopts,
10061                 spinner, mergelist, favorites, digraph):
10062                 PollScheduler.__init__(self)
10063                 self.settings = settings
10064                 self.target_root = settings["ROOT"]
10065                 self.trees = trees
10066                 self.myopts = myopts
10067                 self._spinner = spinner
10068                 self._mtimedb = mtimedb
10069                 self._mergelist = mergelist
10070                 self._favorites = favorites
10071                 self._args_set = InternalPackageSet(favorites)
10072                 self._build_opts = self._build_opts_class()
10073                 for k in self._build_opts.__slots__:
10074                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10075                 self._binpkg_opts = self._binpkg_opts_class()
10076                 for k in self._binpkg_opts.__slots__:
10077                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10078
10079                 self.curval = 0
10080                 self._logger = self._emerge_log_class()
10081                 self._task_queues = self._task_queues_class()
10082                 for k in self._task_queues.allowed_keys:
10083                         setattr(self._task_queues, k,
10084                                 SequentialTaskQueue())
10085
10086                 # Holds merges that will wait to be executed when no builds are
10087                 # executing. This is useful for system packages since dependencies
10088                 # on system packages are frequently unspecified.
10089                 self._merge_wait_queue = []
10090                 # Holds merges that have been transfered from the merge_wait_queue to
10091                 # the actual merge queue. They are removed from this list upon
10092                 # completion. Other packages can start building only when this list is
10093                 # empty.
10094                 self._merge_wait_scheduled = []
10095
10096                 # Holds system packages and their deep runtime dependencies. Before
10097                 # being merged, these packages go to merge_wait_queue, to be merged
10098                 # when no other packages are building.
10099                 self._deep_system_deps = set()
10100
10101                 # Holds packages to merge which will satisfy currently unsatisfied
10102                 # deep runtime dependencies of system packages. If this is not empty
10103                 # then no parallel builds will be spawned until it is empty. This
10104                 # minimizes the possibility that a build will fail due to the system
10105                 # being in a fragile state. For example, see bug #259954.
10106                 self._unsatisfied_system_deps = set()
10107
10108                 self._status_display = JobStatusDisplay()
10109                 self._max_load = myopts.get("--load-average")
10110                 max_jobs = myopts.get("--jobs")
10111                 if max_jobs is None:
10112                         max_jobs = 1
10113                 self._set_max_jobs(max_jobs)
10114
10115                 # The root where the currently running
10116                 # portage instance is installed.
10117                 self._running_root = trees["/"]["root_config"]
10118                 self.edebug = 0
10119                 if settings.get("PORTAGE_DEBUG", "") == "1":
10120                         self.edebug = 1
10121                 self.pkgsettings = {}
10122                 self._config_pool = {}
10123                 self._blocker_db = {}
10124                 for root in trees:
10125                         self._config_pool[root] = []
10126                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10127
10128                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10129                         schedule=self._schedule_fetch)
10130                 self._sched_iface = self._iface_class(
10131                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10132                         dblinkDisplayMerge=self._dblink_display_merge,
10133                         dblinkElog=self._dblink_elog,
10134                         dblinkEmergeLog=self._dblink_emerge_log,
10135                         fetch=fetch_iface, register=self._register,
10136                         schedule=self._schedule_wait,
10137                         scheduleSetup=self._schedule_setup,
10138                         scheduleUnpack=self._schedule_unpack,
10139                         scheduleYield=self._schedule_yield,
10140                         unregister=self._unregister)
10141
10142                 self._prefetchers = weakref.WeakValueDictionary()
10143                 self._pkg_queue = []
10144                 self._completed_tasks = set()
10145
10146                 self._failed_pkgs = []
10147                 self._failed_pkgs_all = []
10148                 self._failed_pkgs_die_msgs = []
10149                 self._post_mod_echo_msgs = []
10150                 self._parallel_fetch = False
10151                 merge_count = len([x for x in mergelist \
10152                         if isinstance(x, Package) and x.operation == "merge"])
10153                 self._pkg_count = self._pkg_count_class(
10154                         curval=0, maxval=merge_count)
10155                 self._status_display.maxval = self._pkg_count.maxval
10156
10157                 # The load average takes some time to respond when new
10158                 # jobs are added, so we need to limit the rate of adding
10159                 # new jobs.
10160                 self._job_delay_max = 10
10161                 self._job_delay_factor = 1.0
10162                 self._job_delay_exp = 1.5
10163                 self._previous_job_start_time = None
10164
10165                 self._set_digraph(digraph)
10166
10167                 # This is used to memoize the _choose_pkg() result when
10168                 # no packages can be chosen until one of the existing
10169                 # jobs completes.
10170                 self._choose_pkg_return_early = False
10171
10172                 features = self.settings.features
10173                 if "parallel-fetch" in features and \
10174                         not ("--pretend" in self.myopts or \
10175                         "--fetch-all-uri" in self.myopts or \
10176                         "--fetchonly" in self.myopts):
10177                         if "distlocks" not in features:
10178                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10179                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10180                                         "requires the distlocks feature enabled"+"\n",
10181                                         noiselevel=-1)
10182                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10183                                         "thus parallel-fetching is being disabled"+"\n",
10184                                         noiselevel=-1)
10185                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10186                         elif len(mergelist) > 1:
10187                                 self._parallel_fetch = True
10188
10189                 if self._parallel_fetch:
10190                                 # clear out existing fetch log if it exists
10191                                 try:
10192                                         open(self._fetch_log, 'w')
10193                                 except EnvironmentError:
10194                                         pass
10195
10196                 self._running_portage = None
10197                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10198                         portage.const.PORTAGE_PACKAGE_ATOM)
10199                 if portage_match:
10200                         cpv = portage_match.pop()
10201                         self._running_portage = self._pkg(cpv, "installed",
10202                                 self._running_root, installed=True)
10203
10204         def _poll(self, timeout=None):
10205                 self._schedule()
10206                 PollScheduler._poll(self, timeout=timeout)
10207
10208         def _set_max_jobs(self, max_jobs):
10209                 self._max_jobs = max_jobs
10210                 self._task_queues.jobs.max_jobs = max_jobs
10211
10212         def _background_mode(self):
10213                 """
10214                 Check if background mode is enabled and adjust states as necessary.
10215
10216                 @rtype: bool
10217                 @returns: True if background mode is enabled, False otherwise.
10218                 """
10219                 background = (self._max_jobs is True or \
10220                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10221                         not bool(self._opts_no_background.intersection(self.myopts))
10222
10223                 if background:
10224                         interactive_tasks = self._get_interactive_tasks()
10225                         if interactive_tasks:
10226                                 background = False
10227                                 writemsg_level(">>> Sending package output to stdio due " + \
10228                                         "to interactive package(s):\n",
10229                                         level=logging.INFO, noiselevel=-1)
10230                                 msg = [""]
10231                                 for pkg in interactive_tasks:
10232                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10233                                         if pkg.root != "/":
10234                                                 pkg_str += " for " + pkg.root
10235                                         msg.append(pkg_str)
10236                                 msg.append("")
10237                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10238                                         level=logging.INFO, noiselevel=-1)
10239                                 if self._max_jobs is True or self._max_jobs > 1:
10240                                         self._set_max_jobs(1)
10241                                         writemsg_level(">>> Setting --jobs=1 due " + \
10242                                                 "to the above interactive package(s)\n",
10243                                                 level=logging.INFO, noiselevel=-1)
10244
10245                 self._status_display.quiet = \
10246                         not background or \
10247                         ("--quiet" in self.myopts and \
10248                         "--verbose" not in self.myopts)
10249
10250                 self._logger.xterm_titles = \
10251                         "notitles" not in self.settings.features and \
10252                         self._status_display.quiet
10253
10254                 return background
10255
10256         def _get_interactive_tasks(self):
10257                 from portage import flatten
10258                 from portage.dep import use_reduce, paren_reduce
10259                 interactive_tasks = []
10260                 for task in self._mergelist:
10261                         if not (isinstance(task, Package) and \
10262                                 task.operation == "merge"):
10263                                 continue
10264                         try:
10265                                 properties = flatten(use_reduce(paren_reduce(
10266                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10267                         except portage.exception.InvalidDependString, e:
10268                                 show_invalid_depstring_notice(task,
10269                                         task.metadata["PROPERTIES"], str(e))
10270                                 raise self._unknown_internal_error()
10271                         if "interactive" in properties:
10272                                 interactive_tasks.append(task)
10273                 return interactive_tasks
10274
10275         def _set_digraph(self, digraph):
10276                 if "--nodeps" in self.myopts or \
10277                         (self._max_jobs is not True and self._max_jobs < 2):
10278                         # save some memory
10279                         self._digraph = None
10280                         return
10281
10282                 self._digraph = digraph
10283                 self._find_system_deps()
10284                 self._prune_digraph()
10285                 self._prevent_builddir_collisions()
10286
10287         def _find_system_deps(self):
10288                 """
10289                 Find system packages and their deep runtime dependencies. Before being
10290                 merged, these packages go to merge_wait_queue, to be merged when no
10291                 other packages are building.
10292                 """
10293                 deep_system_deps = self._deep_system_deps
10294                 deep_system_deps.clear()
10295                 deep_system_deps.update(
10296                         _find_deep_system_runtime_deps(self._digraph))
10297                 deep_system_deps.difference_update([pkg for pkg in \
10298                         deep_system_deps if pkg.operation != "merge"])
10299
10300         def _prune_digraph(self):
10301                 """
10302                 Prune any root nodes that are irrelevant.
10303                 """
10304
10305                 graph = self._digraph
10306                 completed_tasks = self._completed_tasks
10307                 removed_nodes = set()
10308                 while True:
10309                         for node in graph.root_nodes():
10310                                 if not isinstance(node, Package) or \
10311                                         (node.installed and node.operation == "nomerge") or \
10312                                         node.onlydeps or \
10313                                         node in completed_tasks:
10314                                         removed_nodes.add(node)
10315                         if removed_nodes:
10316                                 graph.difference_update(removed_nodes)
10317                         if not removed_nodes:
10318                                 break
10319                         removed_nodes.clear()
10320
10321         def _prevent_builddir_collisions(self):
10322                 """
10323                 When building stages, sometimes the same exact cpv needs to be merged
10324                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10325                 in the builddir. Currently, normal file locks would be inappropriate
10326                 for this purpose since emerge holds all of it's build dir locks from
10327                 the main process.
10328                 """
10329                 cpv_map = {}
10330                 for pkg in self._mergelist:
10331                         if not isinstance(pkg, Package):
10332                                 # a satisfied blocker
10333                                 continue
10334                         if pkg.installed:
10335                                 continue
10336                         if pkg.cpv not in cpv_map:
10337                                 cpv_map[pkg.cpv] = [pkg]
10338                                 continue
10339                         for earlier_pkg in cpv_map[pkg.cpv]:
10340                                 self._digraph.add(earlier_pkg, pkg,
10341                                         priority=DepPriority(buildtime=True))
10342                         cpv_map[pkg.cpv].append(pkg)
10343
10344         class _pkg_failure(portage.exception.PortageException):
10345                 """
10346                 An instance of this class is raised by unmerge() when
10347                 an uninstallation fails.
10348                 """
10349                 status = 1
10350                 def __init__(self, *pargs):
10351                         portage.exception.PortageException.__init__(self, pargs)
10352                         if pargs:
10353                                 self.status = pargs[0]
10354
10355         def _schedule_fetch(self, fetcher):
10356                 """
10357                 Schedule a fetcher on the fetch queue, in order to
10358                 serialize access to the fetch log.
10359                 """
10360                 self._task_queues.fetch.addFront(fetcher)
10361
10362         def _schedule_setup(self, setup_phase):
10363                 """
10364                 Schedule a setup phase on the merge queue, in order to
10365                 serialize unsandboxed access to the live filesystem.
10366                 """
10367                 self._task_queues.merge.addFront(setup_phase)
10368                 self._schedule()
10369
10370         def _schedule_unpack(self, unpack_phase):
10371                 """
10372                 Schedule an unpack phase on the unpack queue, in order
10373                 to serialize $DISTDIR access for live ebuilds.
10374                 """
10375                 self._task_queues.unpack.add(unpack_phase)
10376
10377         def _find_blockers(self, new_pkg):
10378                 """
10379                 Returns a callable which should be called only when
10380                 the vdb lock has been acquired.
10381                 """
10382                 def get_blockers():
10383                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10384                 return get_blockers
10385
10386         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10387                 if self._opts_ignore_blockers.intersection(self.myopts):
10388                         return None
10389
10390                 # Call gc.collect() here to avoid heap overflow that
10391                 # triggers 'Cannot allocate memory' errors (reported
10392                 # with python-2.5).
10393                 import gc
10394                 gc.collect()
10395
10396                 blocker_db = self._blocker_db[new_pkg.root]
10397
10398                 blocker_dblinks = []
10399                 for blocking_pkg in blocker_db.findInstalledBlockers(
10400                         new_pkg, acquire_lock=acquire_lock):
10401                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10402                                 continue
10403                         if new_pkg.cpv == blocking_pkg.cpv:
10404                                 continue
10405                         blocker_dblinks.append(portage.dblink(
10406                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10407                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10408                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10409
10410                 gc.collect()
10411
10412                 return blocker_dblinks
10413
10414         def _dblink_pkg(self, pkg_dblink):
10415                 cpv = pkg_dblink.mycpv
10416                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10417                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10418                 installed = type_name == "installed"
10419                 return self._pkg(cpv, type_name, root_config, installed=installed)
10420
10421         def _append_to_log_path(self, log_path, msg):
10422                 f = open(log_path, 'a')
10423                 try:
10424                         f.write(msg)
10425                 finally:
10426                         f.close()
10427
10428         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10429
10430                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10431                 log_file = None
10432                 out = sys.stdout
10433                 background = self._background
10434
10435                 if background and log_path is not None:
10436                         log_file = open(log_path, 'a')
10437                         out = log_file
10438
10439                 try:
10440                         for msg in msgs:
10441                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10442                 finally:
10443                         if log_file is not None:
10444                                 log_file.close()
10445
10446         def _dblink_emerge_log(self, msg):
10447                 self._logger.log(msg)
10448
10449         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10450                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10451                 background = self._background
10452
10453                 if log_path is None:
10454                         if not (background and level < logging.WARN):
10455                                 portage.util.writemsg_level(msg,
10456                                         level=level, noiselevel=noiselevel)
10457                 else:
10458                         if not background:
10459                                 portage.util.writemsg_level(msg,
10460                                         level=level, noiselevel=noiselevel)
10461                         self._append_to_log_path(log_path, msg)
10462
10463         def _dblink_ebuild_phase(self,
10464                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10465                 """
10466                 Using this callback for merge phases allows the scheduler
10467                 to run while these phases execute asynchronously, and allows
10468                 the scheduler control output handling.
10469                 """
10470
10471                 scheduler = self._sched_iface
10472                 settings = pkg_dblink.settings
10473                 pkg = self._dblink_pkg(pkg_dblink)
10474                 background = self._background
10475                 log_path = settings.get("PORTAGE_LOG_FILE")
10476
10477                 ebuild_phase = EbuildPhase(background=background,
10478                         pkg=pkg, phase=phase, scheduler=scheduler,
10479                         settings=settings, tree=pkg_dblink.treetype)
10480                 ebuild_phase.start()
10481                 ebuild_phase.wait()
10482
10483                 return ebuild_phase.returncode
10484
10485         def _generate_digests(self):
10486                 """
10487                 Generate digests if necessary for --digests or FEATURES=digest.
10488                 In order to avoid interference, this must done before parallel
10489                 tasks are started.
10490                 """
10491
10492                 if '--fetchonly' in self.myopts:
10493                         return os.EX_OK
10494
10495                 digest = '--digest' in self.myopts
10496                 if not digest:
10497                         for pkgsettings in self.pkgsettings.itervalues():
10498                                 if 'digest' in pkgsettings.features:
10499                                         digest = True
10500                                         break
10501
10502                 if not digest:
10503                         return os.EX_OK
10504
10505                 for x in self._mergelist:
10506                         if not isinstance(x, Package) or \
10507                                 x.type_name != 'ebuild' or \
10508                                 x.operation != 'merge':
10509                                 continue
10510                         pkgsettings = self.pkgsettings[x.root]
10511                         if '--digest' not in self.myopts and \
10512                                 'digest' not in pkgsettings.features:
10513                                 continue
10514                         portdb = x.root_config.trees['porttree'].dbapi
10515                         ebuild_path = portdb.findname(x.cpv)
10516                         if not ebuild_path:
10517                                 writemsg_level(
10518                                         "!!! Could not locate ebuild for '%s'.\n" \
10519                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10520                                 return 1
10521                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10522                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10523                                 writemsg_level(
10524                                         "!!! Unable to generate manifest for '%s'.\n" \
10525                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10526                                 return 1
10527
10528                 return os.EX_OK
10529
10530         def _check_manifests(self):
10531                 # Verify all the manifests now so that the user is notified of failure
10532                 # as soon as possible.
10533                 if "strict" not in self.settings.features or \
10534                         "--fetchonly" in self.myopts or \
10535                         "--fetch-all-uri" in self.myopts:
10536                         return os.EX_OK
10537
10538                 shown_verifying_msg = False
10539                 quiet_settings = {}
10540                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10541                         quiet_config = portage.config(clone=pkgsettings)
10542                         quiet_config["PORTAGE_QUIET"] = "1"
10543                         quiet_config.backup_changes("PORTAGE_QUIET")
10544                         quiet_settings[myroot] = quiet_config
10545                         del quiet_config
10546
10547                 for x in self._mergelist:
10548                         if not isinstance(x, Package) or \
10549                                 x.type_name != "ebuild":
10550                                 continue
10551
10552                         if not shown_verifying_msg:
10553                                 shown_verifying_msg = True
10554                                 self._status_msg("Verifying ebuild manifests")
10555
10556                         root_config = x.root_config
10557                         portdb = root_config.trees["porttree"].dbapi
10558                         quiet_config = quiet_settings[root_config.root]
10559                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10560                         if not portage.digestcheck([], quiet_config, strict=True):
10561                                 return 1
10562
10563                 return os.EX_OK
10564
10565         def _add_prefetchers(self):
10566
10567                 if not self._parallel_fetch:
10568                         return
10569
10570                 if self._parallel_fetch:
10571                         self._status_msg("Starting parallel fetch")
10572
10573                         prefetchers = self._prefetchers
10574                         getbinpkg = "--getbinpkg" in self.myopts
10575
10576                         # In order to avoid "waiting for lock" messages
10577                         # at the beginning, which annoy users, never
10578                         # spawn a prefetcher for the first package.
10579                         for pkg in self._mergelist[1:]:
10580                                 prefetcher = self._create_prefetcher(pkg)
10581                                 if prefetcher is not None:
10582                                         self._task_queues.fetch.add(prefetcher)
10583                                         prefetchers[pkg] = prefetcher
10584
10585         def _create_prefetcher(self, pkg):
10586                 """
10587                 @return: a prefetcher, or None if not applicable
10588                 """
10589                 prefetcher = None
10590
10591                 if not isinstance(pkg, Package):
10592                         pass
10593
10594                 elif pkg.type_name == "ebuild":
10595
10596                         prefetcher = EbuildFetcher(background=True,
10597                                 config_pool=self._ConfigPool(pkg.root,
10598                                 self._allocate_config, self._deallocate_config),
10599                                 fetchonly=1, logfile=self._fetch_log,
10600                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10601
10602                 elif pkg.type_name == "binary" and \
10603                         "--getbinpkg" in self.myopts and \
10604                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10605
10606                         prefetcher = BinpkgPrefetcher(background=True,
10607                                 pkg=pkg, scheduler=self._sched_iface)
10608
10609                 return prefetcher
10610
10611         def _is_restart_scheduled(self):
10612                 """
10613                 Check if the merge list contains a replacement
10614                 for the current running instance, that will result
10615                 in restart after merge.
10616                 @rtype: bool
10617                 @returns: True if a restart is scheduled, False otherwise.
10618                 """
10619                 if self._opts_no_restart.intersection(self.myopts):
10620                         return False
10621
10622                 mergelist = self._mergelist
10623
10624                 for i, pkg in enumerate(mergelist):
10625                         if self._is_restart_necessary(pkg) and \
10626                                 i != len(mergelist) - 1:
10627                                 return True
10628
10629                 return False
10630
10631         def _is_restart_necessary(self, pkg):
10632                 """
10633                 @return: True if merging the given package
10634                         requires restart, False otherwise.
10635                 """
10636
10637                 # Figure out if we need a restart.
10638                 if pkg.root == self._running_root.root and \
10639                         portage.match_from_list(
10640                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10641                         if self._running_portage:
10642                                 return pkg.cpv != self._running_portage.cpv
10643                         return True
10644                 return False
10645
10646         def _restart_if_necessary(self, pkg):
10647                 """
10648                 Use execv() to restart emerge. This happens
10649                 if portage upgrades itself and there are
10650                 remaining packages in the list.
10651                 """
10652
10653                 if self._opts_no_restart.intersection(self.myopts):
10654                         return
10655
10656                 if not self._is_restart_necessary(pkg):
10657                         return
10658
10659                 if pkg == self._mergelist[-1]:
10660                         return
10661
10662                 self._main_loop_cleanup()
10663
10664                 logger = self._logger
10665                 pkg_count = self._pkg_count
10666                 mtimedb = self._mtimedb
10667                 bad_resume_opts = self._bad_resume_opts
10668
10669                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10670                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10671
10672                 logger.log(" *** RESTARTING " + \
10673                         "emerge via exec() after change of " + \
10674                         "portage version.")
10675
10676                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10677                 mtimedb.commit()
10678                 portage.run_exitfuncs()
10679                 mynewargv = [sys.argv[0], "--resume"]
10680                 resume_opts = self.myopts.copy()
10681                 # For automatic resume, we need to prevent
10682                 # any of bad_resume_opts from leaking in
10683                 # via EMERGE_DEFAULT_OPTS.
10684                 resume_opts["--ignore-default-opts"] = True
10685                 for myopt, myarg in resume_opts.iteritems():
10686                         if myopt not in bad_resume_opts:
10687                                 if myarg is True:
10688                                         mynewargv.append(myopt)
10689                                 else:
10690                                         mynewargv.append(myopt +"="+ str(myarg))
10691                 # priority only needs to be adjusted on the first run
10692                 os.environ["PORTAGE_NICENESS"] = "0"
10693                 os.execv(mynewargv[0], mynewargv)
10694
10695         def merge(self):
10696
10697                 if "--resume" in self.myopts:
10698                         # We're resuming.
10699                         portage.writemsg_stdout(
10700                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10701                         self._logger.log(" *** Resuming merge...")
10702
10703                 self._save_resume_list()
10704
10705                 try:
10706                         self._background = self._background_mode()
10707                 except self._unknown_internal_error:
10708                         return 1
10709
10710                 for root in self.trees:
10711                         root_config = self.trees[root]["root_config"]
10712
10713                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10714                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10715                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10716                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10717                         if not tmpdir or not os.path.isdir(tmpdir):
10718                                 msg = "The directory specified in your " + \
10719                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10720                                 "does not exist. Please create this " + \
10721                                 "directory or correct your PORTAGE_TMPDIR setting."
10722                                 msg = textwrap.wrap(msg, 70)
10723                                 out = portage.output.EOutput()
10724                                 for l in msg:
10725                                         out.eerror(l)
10726                                 return 1
10727
10728                         if self._background:
10729                                 root_config.settings.unlock()
10730                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10731                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10732                                 root_config.settings.lock()
10733
10734                         self.pkgsettings[root] = portage.config(
10735                                 clone=root_config.settings)
10736
10737                 rval = self._generate_digests()
10738                 if rval != os.EX_OK:
10739                         return rval
10740
10741                 rval = self._check_manifests()
10742                 if rval != os.EX_OK:
10743                         return rval
10744
10745                 keep_going = "--keep-going" in self.myopts
10746                 fetchonly = self._build_opts.fetchonly
10747                 mtimedb = self._mtimedb
10748                 failed_pkgs = self._failed_pkgs
10749
10750                 while True:
10751                         rval = self._merge()
10752                         if rval == os.EX_OK or fetchonly or not keep_going:
10753                                 break
10754                         if "resume" not in mtimedb:
10755                                 break
10756                         mergelist = self._mtimedb["resume"].get("mergelist")
10757                         if not mergelist:
10758                                 break
10759
10760                         if not failed_pkgs:
10761                                 break
10762
10763                         for failed_pkg in failed_pkgs:
10764                                 mergelist.remove(list(failed_pkg.pkg))
10765
10766                         self._failed_pkgs_all.extend(failed_pkgs)
10767                         del failed_pkgs[:]
10768
10769                         if not mergelist:
10770                                 break
10771
10772                         if not self._calc_resume_list():
10773                                 break
10774
10775                         clear_caches(self.trees)
10776                         if not self._mergelist:
10777                                 break
10778
10779                         self._save_resume_list()
10780                         self._pkg_count.curval = 0
10781                         self._pkg_count.maxval = len([x for x in self._mergelist \
10782                                 if isinstance(x, Package) and x.operation == "merge"])
10783                         self._status_display.maxval = self._pkg_count.maxval
10784
10785                 self._logger.log(" *** Finished. Cleaning up...")
10786
10787                 if failed_pkgs:
10788                         self._failed_pkgs_all.extend(failed_pkgs)
10789                         del failed_pkgs[:]
10790
10791                 background = self._background
10792                 failure_log_shown = False
10793                 if background and len(self._failed_pkgs_all) == 1:
10794                         # If only one package failed then just show it's
10795                         # whole log for easy viewing.
10796                         failed_pkg = self._failed_pkgs_all[-1]
10797                         build_dir = failed_pkg.build_dir
10798                         log_file = None
10799
10800                         log_paths = [failed_pkg.build_log]
10801
10802                         log_path = self._locate_failure_log(failed_pkg)
10803                         if log_path is not None:
10804                                 try:
10805                                         log_file = open(log_path)
10806                                 except IOError:
10807                                         pass
10808
10809                         if log_file is not None:
10810                                 try:
10811                                         for line in log_file:
10812                                                 writemsg_level(line, noiselevel=-1)
10813                                 finally:
10814                                         log_file.close()
10815                                 failure_log_shown = True
10816
10817                 # Dump mod_echo output now since it tends to flood the terminal.
10818                 # This allows us to avoid having more important output, generated
10819                 # later, from being swept away by the mod_echo output.
10820                 mod_echo_output =  _flush_elog_mod_echo()
10821
10822                 if background and not failure_log_shown and \
10823                         self._failed_pkgs_all and \
10824                         self._failed_pkgs_die_msgs and \
10825                         not mod_echo_output:
10826
10827                         printer = portage.output.EOutput()
10828                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10829                                 root_msg = ""
10830                                 if mysettings["ROOT"] != "/":
10831                                         root_msg = " merged to %s" % mysettings["ROOT"]
10832                                 print
10833                                 printer.einfo("Error messages for package %s%s:" % \
10834                                         (colorize("INFORM", key), root_msg))
10835                                 print
10836                                 for phase in portage.const.EBUILD_PHASES:
10837                                         if phase not in logentries:
10838                                                 continue
10839                                         for msgtype, msgcontent in logentries[phase]:
10840                                                 if isinstance(msgcontent, basestring):
10841                                                         msgcontent = [msgcontent]
10842                                                 for line in msgcontent:
10843                                                         printer.eerror(line.strip("\n"))
10844
10845                 if self._post_mod_echo_msgs:
10846                         for msg in self._post_mod_echo_msgs:
10847                                 msg()
10848
10849                 if len(self._failed_pkgs_all) > 1 or \
10850                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10851                         if len(self._failed_pkgs_all) > 1:
10852                                 msg = "The following %d packages have " % \
10853                                         len(self._failed_pkgs_all) + \
10854                                         "failed to build or install:"
10855                         else:
10856                                 msg = "The following package has " + \
10857                                         "failed to build or install:"
10858                         prefix = bad(" * ")
10859                         writemsg(prefix + "\n", noiselevel=-1)
10860                         from textwrap import wrap
10861                         for line in wrap(msg, 72):
10862                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10863                         writemsg(prefix + "\n", noiselevel=-1)
10864                         for failed_pkg in self._failed_pkgs_all:
10865                                 writemsg("%s\t%s\n" % (prefix,
10866                                         colorize("INFORM", str(failed_pkg.pkg))),
10867                                         noiselevel=-1)
10868                         writemsg(prefix + "\n", noiselevel=-1)
10869
10870                 return rval
10871
10872         def _elog_listener(self, mysettings, key, logentries, fulltext):
10873                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10874                 if errors:
10875                         self._failed_pkgs_die_msgs.append(
10876                                 (mysettings, key, errors))
10877
10878         def _locate_failure_log(self, failed_pkg):
10879
10880                 build_dir = failed_pkg.build_dir
10881                 log_file = None
10882
10883                 log_paths = [failed_pkg.build_log]
10884
10885                 for log_path in log_paths:
10886                         if not log_path:
10887                                 continue
10888
10889                         try:
10890                                 log_size = os.stat(log_path).st_size
10891                         except OSError:
10892                                 continue
10893
10894                         if log_size == 0:
10895                                 continue
10896
10897                         return log_path
10898
10899                 return None
10900
10901         def _add_packages(self):
10902                 pkg_queue = self._pkg_queue
10903                 for pkg in self._mergelist:
10904                         if isinstance(pkg, Package):
10905                                 pkg_queue.append(pkg)
10906                         elif isinstance(pkg, Blocker):
10907                                 pass
10908
10909         def _system_merge_started(self, merge):
10910                 """
10911                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10912                 """
10913                 graph = self._digraph
10914                 if graph is None:
10915                         return
10916                 pkg = merge.merge.pkg
10917
10918                 # Skip this if $ROOT != / since it shouldn't matter if there
10919                 # are unsatisfied system runtime deps in this case.
10920                 if pkg.root != '/':
10921                         return
10922
10923                 completed_tasks = self._completed_tasks
10924                 unsatisfied = self._unsatisfied_system_deps
10925
10926                 def ignore_non_runtime_or_satisfied(priority):
10927                         """
10928                         Ignore non-runtime and satisfied runtime priorities.
10929                         """
10930                         if isinstance(priority, DepPriority) and \
10931                                 not priority.satisfied and \
10932                                 (priority.runtime or priority.runtime_post):
10933                                 return False
10934                         return True
10935
10936                 # When checking for unsatisfied runtime deps, only check
10937                 # direct deps since indirect deps are checked when the
10938                 # corresponding parent is merged.
10939                 for child in graph.child_nodes(pkg,
10940                         ignore_priority=ignore_non_runtime_or_satisfied):
10941                         if not isinstance(child, Package) or \
10942                                 child.operation == 'uninstall':
10943                                 continue
10944                         if child is pkg:
10945                                 continue
10946                         if child.operation == 'merge' and \
10947                                 child not in completed_tasks:
10948                                 unsatisfied.add(child)
10949
10950         def _merge_wait_exit_handler(self, task):
10951                 self._merge_wait_scheduled.remove(task)
10952                 self._merge_exit(task)
10953
10954         def _merge_exit(self, merge):
10955                 self._do_merge_exit(merge)
10956                 self._deallocate_config(merge.merge.settings)
10957                 if merge.returncode == os.EX_OK and \
10958                         not merge.merge.pkg.installed:
10959                         self._status_display.curval += 1
10960                 self._status_display.merges = len(self._task_queues.merge)
10961                 self._schedule()
10962
10963         def _do_merge_exit(self, merge):
10964                 pkg = merge.merge.pkg
10965                 if merge.returncode != os.EX_OK:
10966                         settings = merge.merge.settings
10967                         build_dir = settings.get("PORTAGE_BUILDDIR")
10968                         build_log = settings.get("PORTAGE_LOG_FILE")
10969
10970                         self._failed_pkgs.append(self._failed_pkg(
10971                                 build_dir=build_dir, build_log=build_log,
10972                                 pkg=pkg,
10973                                 returncode=merge.returncode))
10974                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10975
10976                         self._status_display.failed = len(self._failed_pkgs)
10977                         return
10978
10979                 self._task_complete(pkg)
10980                 pkg_to_replace = merge.merge.pkg_to_replace
10981                 if pkg_to_replace is not None:
10982                         # When a package is replaced, mark it's uninstall
10983                         # task complete (if any).
10984                         uninst_hash_key = \
10985                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10986                         self._task_complete(uninst_hash_key)
10987
10988                 if pkg.installed:
10989                         return
10990
10991                 self._restart_if_necessary(pkg)
10992
10993                 # Call mtimedb.commit() after each merge so that
10994                 # --resume still works after being interrupted
10995                 # by reboot, sigkill or similar.
10996                 mtimedb = self._mtimedb
10997                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10998                 if not mtimedb["resume"]["mergelist"]:
10999                         del mtimedb["resume"]
11000                 mtimedb.commit()
11001
11002         def _build_exit(self, build):
11003                 if build.returncode == os.EX_OK:
11004                         self.curval += 1
11005                         merge = PackageMerge(merge=build)
11006                         if not build.build_opts.buildpkgonly and \
11007                                 build.pkg in self._deep_system_deps:
11008                                 # Since dependencies on system packages are frequently
11009                                 # unspecified, merge them only when no builds are executing.
11010                                 self._merge_wait_queue.append(merge)
11011                                 merge.addStartListener(self._system_merge_started)
11012                         else:
11013                                 merge.addExitListener(self._merge_exit)
11014                                 self._task_queues.merge.add(merge)
11015                                 self._status_display.merges = len(self._task_queues.merge)
11016                 else:
11017                         settings = build.settings
11018                         build_dir = settings.get("PORTAGE_BUILDDIR")
11019                         build_log = settings.get("PORTAGE_LOG_FILE")
11020
11021                         self._failed_pkgs.append(self._failed_pkg(
11022                                 build_dir=build_dir, build_log=build_log,
11023                                 pkg=build.pkg,
11024                                 returncode=build.returncode))
11025                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11026
11027                         self._status_display.failed = len(self._failed_pkgs)
11028                         self._deallocate_config(build.settings)
11029                 self._jobs -= 1
11030                 self._status_display.running = self._jobs
11031                 self._schedule()
11032
11033         def _extract_exit(self, build):
11034                 self._build_exit(build)
11035
11036         def _task_complete(self, pkg):
11037                 self._completed_tasks.add(pkg)
11038                 self._unsatisfied_system_deps.discard(pkg)
11039                 self._choose_pkg_return_early = False
11040
11041         def _merge(self):
11042
11043                 self._add_prefetchers()
11044                 self._add_packages()
11045                 pkg_queue = self._pkg_queue
11046                 failed_pkgs = self._failed_pkgs
11047                 portage.locks._quiet = self._background
11048                 portage.elog._emerge_elog_listener = self._elog_listener
11049                 rval = os.EX_OK
11050
11051                 try:
11052                         self._main_loop()
11053                 finally:
11054                         self._main_loop_cleanup()
11055                         portage.locks._quiet = False
11056                         portage.elog._emerge_elog_listener = None
11057                         if failed_pkgs:
11058                                 rval = failed_pkgs[-1].returncode
11059
11060                 return rval
11061
11062         def _main_loop_cleanup(self):
11063                 del self._pkg_queue[:]
11064                 self._completed_tasks.clear()
11065                 self._deep_system_deps.clear()
11066                 self._unsatisfied_system_deps.clear()
11067                 self._choose_pkg_return_early = False
11068                 self._status_display.reset()
11069                 self._digraph = None
11070                 self._task_queues.fetch.clear()
11071
11072         def _choose_pkg(self):
11073                 """
11074                 Choose a task that has all it's dependencies satisfied.
11075                 """
11076
11077                 if self._choose_pkg_return_early:
11078                         return None
11079
11080                 if self._digraph is None:
11081                         if (self._jobs or self._task_queues.merge) and \
11082                                 not ("--nodeps" in self.myopts and \
11083                                 (self._max_jobs is True or self._max_jobs > 1)):
11084                                 self._choose_pkg_return_early = True
11085                                 return None
11086                         return self._pkg_queue.pop(0)
11087
11088                 if not (self._jobs or self._task_queues.merge):
11089                         return self._pkg_queue.pop(0)
11090
11091                 self._prune_digraph()
11092
11093                 chosen_pkg = None
11094                 later = set(self._pkg_queue)
11095                 for pkg in self._pkg_queue:
11096                         later.remove(pkg)
11097                         if not self._dependent_on_scheduled_merges(pkg, later):
11098                                 chosen_pkg = pkg
11099                                 break
11100
11101                 if chosen_pkg is not None:
11102                         self._pkg_queue.remove(chosen_pkg)
11103
11104                 if chosen_pkg is None:
11105                         # There's no point in searching for a package to
11106                         # choose until at least one of the existing jobs
11107                         # completes.
11108                         self._choose_pkg_return_early = True
11109
11110                 return chosen_pkg
11111
11112         def _dependent_on_scheduled_merges(self, pkg, later):
11113                 """
11114                 Traverse the subgraph of the given packages deep dependencies
11115                 to see if it contains any scheduled merges.
11116                 @param pkg: a package to check dependencies for
11117                 @type pkg: Package
11118                 @param later: packages for which dependence should be ignored
11119                         since they will be merged later than pkg anyway and therefore
11120                         delaying the merge of pkg will not result in a more optimal
11121                         merge order
11122                 @type later: set
11123                 @rtype: bool
11124                 @returns: True if the package is dependent, False otherwise.
11125                 """
11126
11127                 graph = self._digraph
11128                 completed_tasks = self._completed_tasks
11129
11130                 dependent = False
11131                 traversed_nodes = set([pkg])
11132                 direct_deps = graph.child_nodes(pkg)
11133                 node_stack = direct_deps
11134                 direct_deps = frozenset(direct_deps)
11135                 while node_stack:
11136                         node = node_stack.pop()
11137                         if node in traversed_nodes:
11138                                 continue
11139                         traversed_nodes.add(node)
11140                         if not ((node.installed and node.operation == "nomerge") or \
11141                                 (node.operation == "uninstall" and \
11142                                 node not in direct_deps) or \
11143                                 node in completed_tasks or \
11144                                 node in later):
11145                                 dependent = True
11146                                 break
11147                         node_stack.extend(graph.child_nodes(node))
11148
11149                 return dependent
11150
11151         def _allocate_config(self, root):
11152                 """
11153                 Allocate a unique config instance for a task in order
11154                 to prevent interference between parallel tasks.
11155                 """
11156                 if self._config_pool[root]:
11157                         temp_settings = self._config_pool[root].pop()
11158                 else:
11159                         temp_settings = portage.config(clone=self.pkgsettings[root])
11160                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11161                 # performance reasons, call it here to make sure all settings from the
11162                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11163                 temp_settings.reload()
11164                 temp_settings.reset()
11165                 return temp_settings
11166
11167         def _deallocate_config(self, settings):
11168                 self._config_pool[settings["ROOT"]].append(settings)
11169
11170         def _main_loop(self):
11171
11172                 # Only allow 1 job max if a restart is scheduled
11173                 # due to portage update.
11174                 if self._is_restart_scheduled() or \
11175                         self._opts_no_background.intersection(self.myopts):
11176                         self._set_max_jobs(1)
11177
11178                 merge_queue = self._task_queues.merge
11179
11180                 while self._schedule():
11181                         if self._poll_event_handlers:
11182                                 self._poll_loop()
11183
11184                 while True:
11185                         self._schedule()
11186                         if not (self._jobs or merge_queue):
11187                                 break
11188                         if self._poll_event_handlers:
11189                                 self._poll_loop()
11190
11191         def _keep_scheduling(self):
11192                 return bool(self._pkg_queue and \
11193                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11194
11195         def _schedule_tasks(self):
11196
11197                 # When the number of jobs drops to zero, process all waiting merges.
11198                 if not self._jobs and self._merge_wait_queue:
11199                         for task in self._merge_wait_queue:
11200                                 task.addExitListener(self._merge_wait_exit_handler)
11201                                 self._task_queues.merge.add(task)
11202                         self._status_display.merges = len(self._task_queues.merge)
11203                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11204                         del self._merge_wait_queue[:]
11205
11206                 self._schedule_tasks_imp()
11207                 self._status_display.display()
11208
11209                 state_change = 0
11210                 for q in self._task_queues.values():
11211                         if q.schedule():
11212                                 state_change += 1
11213
11214                 # Cancel prefetchers if they're the only reason
11215                 # the main poll loop is still running.
11216                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11217                         not (self._jobs or self._task_queues.merge) and \
11218                         self._task_queues.fetch:
11219                         self._task_queues.fetch.clear()
11220                         state_change += 1
11221
11222                 if state_change:
11223                         self._schedule_tasks_imp()
11224                         self._status_display.display()
11225
11226                 return self._keep_scheduling()
11227
11228         def _job_delay(self):
11229                 """
11230                 @rtype: bool
11231                 @returns: True if job scheduling should be delayed, False otherwise.
11232                 """
11233
11234                 if self._jobs and self._max_load is not None:
11235
11236                         current_time = time.time()
11237
11238                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11239                         if delay > self._job_delay_max:
11240                                 delay = self._job_delay_max
11241                         if (current_time - self._previous_job_start_time) < delay:
11242                                 return True
11243
11244                 return False
11245
11246         def _schedule_tasks_imp(self):
11247                 """
11248                 @rtype: bool
11249                 @returns: True if state changed, False otherwise.
11250                 """
11251
11252                 state_change = 0
11253
11254                 while True:
11255
11256                         if not self._keep_scheduling():
11257                                 return bool(state_change)
11258
11259                         if self._choose_pkg_return_early or \
11260                                 self._merge_wait_scheduled or \
11261                                 (self._jobs and self._unsatisfied_system_deps) or \
11262                                 not self._can_add_job() or \
11263                                 self._job_delay():
11264                                 return bool(state_change)
11265
11266                         pkg = self._choose_pkg()
11267                         if pkg is None:
11268                                 return bool(state_change)
11269
11270                         state_change += 1
11271
11272                         if not pkg.installed:
11273                                 self._pkg_count.curval += 1
11274
11275                         task = self._task(pkg)
11276
11277                         if pkg.installed:
11278                                 merge = PackageMerge(merge=task)
11279                                 merge.addExitListener(self._merge_exit)
11280                                 self._task_queues.merge.add(merge)
11281
11282                         elif pkg.built:
11283                                 self._jobs += 1
11284                                 self._previous_job_start_time = time.time()
11285                                 self._status_display.running = self._jobs
11286                                 task.addExitListener(self._extract_exit)
11287                                 self._task_queues.jobs.add(task)
11288
11289                         else:
11290                                 self._jobs += 1
11291                                 self._previous_job_start_time = time.time()
11292                                 self._status_display.running = self._jobs
11293                                 task.addExitListener(self._build_exit)
11294                                 self._task_queues.jobs.add(task)
11295
11296                 return bool(state_change)
11297
11298         def _task(self, pkg):
11299
11300                 pkg_to_replace = None
11301                 if pkg.operation != "uninstall":
11302                         vardb = pkg.root_config.trees["vartree"].dbapi
11303                         previous_cpv = vardb.match(pkg.slot_atom)
11304                         if previous_cpv:
11305                                 previous_cpv = previous_cpv.pop()
11306                                 pkg_to_replace = self._pkg(previous_cpv,
11307                                         "installed", pkg.root_config, installed=True)
11308
11309                 task = MergeListItem(args_set=self._args_set,
11310                         background=self._background, binpkg_opts=self._binpkg_opts,
11311                         build_opts=self._build_opts,
11312                         config_pool=self._ConfigPool(pkg.root,
11313                         self._allocate_config, self._deallocate_config),
11314                         emerge_opts=self.myopts,
11315                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11316                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11317                         pkg_to_replace=pkg_to_replace,
11318                         prefetcher=self._prefetchers.get(pkg),
11319                         scheduler=self._sched_iface,
11320                         settings=self._allocate_config(pkg.root),
11321                         statusMessage=self._status_msg,
11322                         world_atom=self._world_atom)
11323
11324                 return task
11325
11326         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11327                 pkg = failed_pkg.pkg
11328                 msg = "%s to %s %s" % \
11329                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11330                 if pkg.root != "/":
11331                         msg += " %s %s" % (preposition, pkg.root)
11332
11333                 log_path = self._locate_failure_log(failed_pkg)
11334                 if log_path is not None:
11335                         msg += ", Log file:"
11336                 self._status_msg(msg)
11337
11338                 if log_path is not None:
11339                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11340
11341         def _status_msg(self, msg):
11342                 """
11343                 Display a brief status message (no newlines) in the status display.
11344                 This is called by tasks to provide feedback to the user. This
11345                 delegates the resposibility of generating \r and \n control characters,
11346                 to guarantee that lines are created or erased when necessary and
11347                 appropriate.
11348
11349                 @type msg: str
11350                 @param msg: a brief status message (no newlines allowed)
11351                 """
11352                 if not self._background:
11353                         writemsg_level("\n")
11354                 self._status_display.displayMessage(msg)
11355
11356         def _save_resume_list(self):
11357                 """
11358                 Do this before verifying the ebuild Manifests since it might
11359                 be possible for the user to use --resume --skipfirst get past
11360                 a non-essential package with a broken digest.
11361                 """
11362                 mtimedb = self._mtimedb
11363                 mtimedb["resume"]["mergelist"] = [list(x) \
11364                         for x in self._mergelist \
11365                         if isinstance(x, Package) and x.operation == "merge"]
11366
11367                 mtimedb.commit()
11368
11369         def _calc_resume_list(self):
11370                 """
11371                 Use the current resume list to calculate a new one,
11372                 dropping any packages with unsatisfied deps.
11373                 @rtype: bool
11374                 @returns: True if successful, False otherwise.
11375                 """
11376                 print colorize("GOOD", "*** Resuming merge...")
11377
11378                 if self._show_list():
11379                         if "--tree" in self.myopts:
11380                                 portage.writemsg_stdout("\n" + \
11381                                         darkgreen("These are the packages that " + \
11382                                         "would be merged, in reverse order:\n\n"))
11383
11384                         else:
11385                                 portage.writemsg_stdout("\n" + \
11386                                         darkgreen("These are the packages that " + \
11387                                         "would be merged, in order:\n\n"))
11388
11389                 show_spinner = "--quiet" not in self.myopts and \
11390                         "--nodeps" not in self.myopts
11391
11392                 if show_spinner:
11393                         print "Calculating dependencies  ",
11394
11395                 myparams = create_depgraph_params(self.myopts, None)
11396                 success = False
11397                 e = None
11398                 try:
11399                         success, mydepgraph, dropped_tasks = resume_depgraph(
11400                                 self.settings, self.trees, self._mtimedb, self.myopts,
11401                                 myparams, self._spinner)
11402                 except depgraph.UnsatisfiedResumeDep, exc:
11403                         # rename variable to avoid python-3.0 error:
11404                         # SyntaxError: can not delete variable 'e' referenced in nested
11405                         #              scope
11406                         e = exc
11407                         mydepgraph = e.depgraph
11408                         dropped_tasks = set()
11409
11410                 if show_spinner:
11411                         print "\b\b... done!"
11412
11413                 if e is not None:
11414                         def unsatisfied_resume_dep_msg():
11415                                 mydepgraph.display_problems()
11416                                 out = portage.output.EOutput()
11417                                 out.eerror("One or more packages are either masked or " + \
11418                                         "have missing dependencies:")
11419                                 out.eerror("")
11420                                 indent = "  "
11421                                 show_parents = set()
11422                                 for dep in e.value:
11423                                         if dep.parent in show_parents:
11424                                                 continue
11425                                         show_parents.add(dep.parent)
11426                                         if dep.atom is None:
11427                                                 out.eerror(indent + "Masked package:")
11428                                                 out.eerror(2 * indent + str(dep.parent))
11429                                                 out.eerror("")
11430                                         else:
11431                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11432                                                 out.eerror(2 * indent + str(dep.parent))
11433                                                 out.eerror("")
11434                                 msg = "The resume list contains packages " + \
11435                                         "that are either masked or have " + \
11436                                         "unsatisfied dependencies. " + \
11437                                         "Please restart/continue " + \
11438                                         "the operation manually, or use --skipfirst " + \
11439                                         "to skip the first package in the list and " + \
11440                                         "any other packages that may be " + \
11441                                         "masked or have missing dependencies."
11442                                 for line in textwrap.wrap(msg, 72):
11443                                         out.eerror(line)
11444                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11445                         return False
11446
11447                 if success and self._show_list():
11448                         mylist = mydepgraph.altlist()
11449                         if mylist:
11450                                 if "--tree" in self.myopts:
11451                                         mylist.reverse()
11452                                 mydepgraph.display(mylist, favorites=self._favorites)
11453
11454                 if not success:
11455                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11456                         return False
11457                 mydepgraph.display_problems()
11458
11459                 mylist = mydepgraph.altlist()
11460                 mydepgraph.break_refs(mylist)
11461                 mydepgraph.break_refs(dropped_tasks)
11462                 self._mergelist = mylist
11463                 self._set_digraph(mydepgraph.schedulerGraph())
11464
11465                 msg_width = 75
11466                 for task in dropped_tasks:
11467                         if not (isinstance(task, Package) and task.operation == "merge"):
11468                                 continue
11469                         pkg = task
11470                         msg = "emerge --keep-going:" + \
11471                                 " %s" % (pkg.cpv,)
11472                         if pkg.root != "/":
11473                                 msg += " for %s" % (pkg.root,)
11474                         msg += " dropped due to unsatisfied dependency."
11475                         for line in textwrap.wrap(msg, msg_width):
11476                                 eerror(line, phase="other", key=pkg.cpv)
11477                         settings = self.pkgsettings[pkg.root]
11478                         # Ensure that log collection from $T is disabled inside
11479                         # elog_process(), since any logs that might exist are
11480                         # not valid here.
11481                         settings.pop("T", None)
11482                         portage.elog.elog_process(pkg.cpv, settings)
11483                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11484
11485                 return True
11486
11487         def _show_list(self):
11488                 myopts = self.myopts
11489                 if "--quiet" not in myopts and \
11490                         ("--ask" in myopts or "--tree" in myopts or \
11491                         "--verbose" in myopts):
11492                         return True
11493                 return False
11494
11495         def _world_atom(self, pkg):
11496                 """
11497                 Add the package to the world file, but only if
11498                 it's supposed to be added. Otherwise, do nothing.
11499                 """
11500
11501                 if set(("--buildpkgonly", "--fetchonly",
11502                         "--fetch-all-uri",
11503                         "--oneshot", "--onlydeps",
11504                         "--pretend")).intersection(self.myopts):
11505                         return
11506
11507                 if pkg.root != self.target_root:
11508                         return
11509
11510                 args_set = self._args_set
11511                 if not args_set.findAtomForPackage(pkg):
11512                         return
11513
11514                 logger = self._logger
11515                 pkg_count = self._pkg_count
11516                 root_config = pkg.root_config
11517                 world_set = root_config.sets["world"]
11518                 world_locked = False
11519                 if hasattr(world_set, "lock"):
11520                         world_set.lock()
11521                         world_locked = True
11522
11523                 try:
11524                         if hasattr(world_set, "load"):
11525                                 world_set.load() # maybe it's changed on disk
11526
11527                         atom = create_world_atom(pkg, args_set, root_config)
11528                         if atom:
11529                                 if hasattr(world_set, "add"):
11530                                         self._status_msg(('Recording %s in "world" ' + \
11531                                                 'favorites file...') % atom)
11532                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11533                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11534                                         world_set.add(atom)
11535                                 else:
11536                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11537                                                 (atom,), level=logging.WARN, noiselevel=-1)
11538                 finally:
11539                         if world_locked:
11540                                 world_set.unlock()
11541
11542         def _pkg(self, cpv, type_name, root_config, installed=False):
11543                 """
11544                 Get a package instance from the cache, or create a new
11545                 one if necessary. Raises KeyError from aux_get if it
11546                 failures for some reason (package does not exist or is
11547                 corrupt).
11548                 """
11549                 operation = "merge"
11550                 if installed:
11551                         operation = "nomerge"
11552
11553                 if self._digraph is not None:
11554                         # Reuse existing instance when available.
11555                         pkg = self._digraph.get(
11556                                 (type_name, root_config.root, cpv, operation))
11557                         if pkg is not None:
11558                                 return pkg
11559
11560                 tree_type = depgraph.pkg_tree_map[type_name]
11561                 db = root_config.trees[tree_type].dbapi
11562                 db_keys = list(self.trees[root_config.root][
11563                         tree_type].dbapi._aux_cache_keys)
11564                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11565                 pkg = Package(cpv=cpv, metadata=metadata,
11566                         root_config=root_config, installed=installed)
11567                 if type_name == "ebuild":
11568                         settings = self.pkgsettings[root_config.root]
11569                         settings.setcpv(pkg)
11570                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11571                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11572
11573                 return pkg
11574
11575 class MetadataRegen(PollScheduler):
11576
11577         def __init__(self, portdb, cp_iter=None, consumer=None,
11578                 max_jobs=None, max_load=None):
11579                 PollScheduler.__init__(self)
11580                 self._portdb = portdb
11581                 self._global_cleanse = False
11582                 if cp_iter is None:
11583                         cp_iter = self._iter_every_cp()
11584                         # We can globally cleanse stale cache only if we
11585                         # iterate over every single cp.
11586                         self._global_cleanse = True
11587                 self._cp_iter = cp_iter
11588                 self._consumer = consumer
11589
11590                 if max_jobs is None:
11591                         max_jobs = 1
11592
11593                 self._max_jobs = max_jobs
11594                 self._max_load = max_load
11595                 self._sched_iface = self._sched_iface_class(
11596                         register=self._register,
11597                         schedule=self._schedule_wait,
11598                         unregister=self._unregister)
11599
11600                 self._valid_pkgs = set()
11601                 self._cp_set = set()
11602                 self._process_iter = self._iter_metadata_processes()
11603                 self.returncode = os.EX_OK
11604                 self._error_count = 0
11605
11606         def _iter_every_cp(self):
11607                 every_cp = self._portdb.cp_all()
11608                 every_cp.sort(reverse=True)
11609                 try:
11610                         while True:
11611                                 yield every_cp.pop()
11612                 except IndexError:
11613                         pass
11614
11615         def _iter_metadata_processes(self):
11616                 portdb = self._portdb
11617                 valid_pkgs = self._valid_pkgs
11618                 cp_set = self._cp_set
11619                 consumer = self._consumer
11620
11621                 for cp in self._cp_iter:
11622                         cp_set.add(cp)
11623                         portage.writemsg_stdout("Processing %s\n" % cp)
11624                         cpv_list = portdb.cp_list(cp)
11625                         for cpv in cpv_list:
11626                                 valid_pkgs.add(cpv)
11627                                 ebuild_path, repo_path = portdb.findname2(cpv)
11628                                 metadata, st, emtime = portdb._pull_valid_cache(
11629                                         cpv, ebuild_path, repo_path)
11630                                 if metadata is not None:
11631                                         if consumer is not None:
11632                                                 consumer(cpv, ebuild_path,
11633                                                         repo_path, metadata)
11634                                         continue
11635
11636                                 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11637                                         ebuild_mtime=emtime,
11638                                         metadata_callback=portdb._metadata_callback,
11639                                         portdb=portdb, repo_path=repo_path,
11640                                         settings=portdb.doebuild_settings)
11641
11642         def run(self):
11643
11644                 portdb = self._portdb
11645                 from portage.cache.cache_errors import CacheError
11646                 dead_nodes = {}
11647
11648                 while self._schedule():
11649                         self._poll_loop()
11650
11651                 while self._jobs:
11652                         self._poll_loop()
11653
11654                 if self._global_cleanse:
11655                         for mytree in portdb.porttrees:
11656                                 try:
11657                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11658                                 except CacheError, e:
11659                                         portage.writemsg("Error listing cache entries for " + \
11660                                                 "'%s': %s, continuing...\n" % (mytree, e),
11661                                                 noiselevel=-1)
11662                                         del e
11663                                         dead_nodes = None
11664                                         break
11665                 else:
11666                         cp_set = self._cp_set
11667                         cpv_getkey = portage.cpv_getkey
11668                         for mytree in portdb.porttrees:
11669                                 try:
11670                                         dead_nodes[mytree] = set(cpv for cpv in \
11671                                                 portdb.auxdb[mytree].iterkeys() \
11672                                                 if cpv_getkey(cpv) in cp_set)
11673                                 except CacheError, e:
11674                                         portage.writemsg("Error listing cache entries for " + \
11675                                                 "'%s': %s, continuing...\n" % (mytree, e),
11676                                                 noiselevel=-1)
11677                                         del e
11678                                         dead_nodes = None
11679                                         break
11680
11681                 if dead_nodes:
11682                         for y in self._valid_pkgs:
11683                                 for mytree in portdb.porttrees:
11684                                         if portdb.findname2(y, mytree=mytree)[0]:
11685                                                 dead_nodes[mytree].discard(y)
11686
11687                         for mytree, nodes in dead_nodes.iteritems():
11688                                 auxdb = portdb.auxdb[mytree]
11689                                 for y in nodes:
11690                                         try:
11691                                                 del auxdb[y]
11692                                         except (KeyError, CacheError):
11693                                                 pass
11694
11695         def _schedule_tasks(self):
11696                 """
11697                 @rtype: bool
11698                 @returns: True if there may be remaining tasks to schedule,
11699                         False otherwise.
11700                 """
11701                 while self._can_add_job():
11702                         try:
11703                                 metadata_process = self._process_iter.next()
11704                         except StopIteration:
11705                                 return False
11706
11707                         self._jobs += 1
11708                         metadata_process.scheduler = self._sched_iface
11709                         metadata_process.addExitListener(self._metadata_exit)
11710                         metadata_process.start()
11711                 return True
11712
11713         def _metadata_exit(self, metadata_process):
11714                 self._jobs -= 1
11715                 if metadata_process.returncode != os.EX_OK:
11716                         self.returncode = 1
11717                         self._error_count += 1
11718                         self._valid_pkgs.discard(metadata_process.cpv)
11719                         portage.writemsg("Error processing %s, continuing...\n" % \
11720                                 (metadata_process.cpv,), noiselevel=-1)
11721
11722                 if self._consumer is not None:
11723                         # On failure, still notify the consumer (in this case the metadata
11724                         # argument is None).
11725                         self._consumer(metadata_process.cpv,
11726                                 metadata_process.ebuild_path,
11727                                 metadata_process.repo_path,
11728                                 metadata_process.metadata)
11729
11730                 self._schedule()
11731
11732 class UninstallFailure(portage.exception.PortageException):
11733         """
11734         An instance of this class is raised by unmerge() when
11735         an uninstallation fails.
11736         """
11737         status = 1
11738         def __init__(self, *pargs):
11739                 portage.exception.PortageException.__init__(self, pargs)
11740                 if pargs:
11741                         self.status = pargs[0]
11742
11743 def unmerge(root_config, myopts, unmerge_action,
11744         unmerge_files, ldpath_mtimes, autoclean=0,
11745         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11746         scheduler=None, writemsg_level=portage.util.writemsg_level):
11747
11748         quiet = "--quiet" in myopts
11749         settings = root_config.settings
11750         sets = root_config.sets
11751         vartree = root_config.trees["vartree"]
11752         candidate_catpkgs=[]
11753         global_unmerge=0
11754         xterm_titles = "notitles" not in settings.features
11755         out = portage.output.EOutput()
11756         pkg_cache = {}
11757         db_keys = list(vartree.dbapi._aux_cache_keys)
11758
11759         def _pkg(cpv):
11760                 pkg = pkg_cache.get(cpv)
11761                 if pkg is None:
11762                         pkg = Package(cpv=cpv, installed=True,
11763                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11764                                 root_config=root_config,
11765                                 type_name="installed")
11766                         pkg_cache[cpv] = pkg
11767                 return pkg
11768
11769         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11770         try:
11771                 # At least the parent needs to exist for the lock file.
11772                 portage.util.ensure_dirs(vdb_path)
11773         except portage.exception.PortageException:
11774                 pass
11775         vdb_lock = None
11776         try:
11777                 if os.access(vdb_path, os.W_OK):
11778                         vdb_lock = portage.locks.lockdir(vdb_path)
11779                 realsyslist = sets["system"].getAtoms()
11780                 syslist = []
11781                 for x in realsyslist:
11782                         mycp = portage.dep_getkey(x)
11783                         if mycp in settings.getvirtuals():
11784                                 providers = []
11785                                 for provider in settings.getvirtuals()[mycp]:
11786                                         if vartree.dbapi.match(provider):
11787                                                 providers.append(provider)
11788                                 if len(providers) == 1:
11789                                         syslist.extend(providers)
11790                         else:
11791                                 syslist.append(mycp)
11792         
11793                 mysettings = portage.config(clone=settings)
11794         
11795                 if not unmerge_files:
11796                         if unmerge_action == "unmerge":
11797                                 print
11798                                 print bold("emerge unmerge") + " can only be used with specific package names"
11799                                 print
11800                                 return 0
11801                         else:
11802                                 global_unmerge = 1
11803         
11804                 localtree = vartree
11805                 # process all arguments and add all
11806                 # valid db entries to candidate_catpkgs
11807                 if global_unmerge:
11808                         if not unmerge_files:
11809                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11810                 else:
11811                         #we've got command-line arguments
11812                         if not unmerge_files:
11813                                 print "\nNo packages to unmerge have been provided.\n"
11814                                 return 0
11815                         for x in unmerge_files:
11816                                 arg_parts = x.split('/')
11817                                 if x[0] not in [".","/"] and \
11818                                         arg_parts[-1][-7:] != ".ebuild":
11819                                         #possible cat/pkg or dep; treat as such
11820                                         candidate_catpkgs.append(x)
11821                                 elif unmerge_action in ["prune","clean"]:
11822                                         print "\n!!! Prune and clean do not accept individual" + \
11823                                                 " ebuilds as arguments;\n    skipping.\n"
11824                                         continue
11825                                 else:
11826                                         # it appears that the user is specifying an installed
11827                                         # ebuild and we're in "unmerge" mode, so it's ok.
11828                                         if not os.path.exists(x):
11829                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11830                                                 return 0
11831         
11832                                         absx   = os.path.abspath(x)
11833                                         sp_absx = absx.split("/")
11834                                         if sp_absx[-1][-7:] == ".ebuild":
11835                                                 del sp_absx[-1]
11836                                                 absx = "/".join(sp_absx)
11837         
11838                                         sp_absx_len = len(sp_absx)
11839         
11840                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11841                                         vdb_len  = len(vdb_path)
11842         
11843                                         sp_vdb     = vdb_path.split("/")
11844                                         sp_vdb_len = len(sp_vdb)
11845         
11846                                         if not os.path.exists(absx+"/CONTENTS"):
11847                                                 print "!!! Not a valid db dir: "+str(absx)
11848                                                 return 0
11849         
11850                                         if sp_absx_len <= sp_vdb_len:
11851                                                 # The Path is shorter... so it can't be inside the vdb.
11852                                                 print sp_absx
11853                                                 print absx
11854                                                 print "\n!!!",x,"cannot be inside "+ \
11855                                                         vdb_path+"; aborting.\n"
11856                                                 return 0
11857         
11858                                         for idx in range(0,sp_vdb_len):
11859                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11860                                                         print sp_absx
11861                                                         print absx
11862                                                         print "\n!!!", x, "is not inside "+\
11863                                                                 vdb_path+"; aborting.\n"
11864                                                         return 0
11865         
11866                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11867                                         candidate_catpkgs.append(
11868                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11869         
11870                 newline=""
11871                 if (not "--quiet" in myopts):
11872                         newline="\n"
11873                 if settings["ROOT"] != "/":
11874                         writemsg_level(darkgreen(newline+ \
11875                                 ">>> Using system located in ROOT tree %s\n" % \
11876                                 settings["ROOT"]))
11877
11878                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11879                         not ("--quiet" in myopts):
11880                         writemsg_level(darkgreen(newline+\
11881                                 ">>> These are the packages that would be unmerged:\n"))
11882
11883                 # Preservation of order is required for --depclean and --prune so
11884                 # that dependencies are respected. Use all_selected to eliminate
11885                 # duplicate packages since the same package may be selected by
11886                 # multiple atoms.
11887                 pkgmap = []
11888                 all_selected = set()
11889                 for x in candidate_catpkgs:
11890                         # cycle through all our candidate deps and determine
11891                         # what will and will not get unmerged
11892                         try:
11893                                 mymatch = vartree.dbapi.match(x)
11894                         except portage.exception.AmbiguousPackageName, errpkgs:
11895                                 print "\n\n!!! The short ebuild name \"" + \
11896                                         x + "\" is ambiguous.  Please specify"
11897                                 print "!!! one of the following fully-qualified " + \
11898                                         "ebuild names instead:\n"
11899                                 for i in errpkgs[0]:
11900                                         print "    " + green(i)
11901                                 print
11902                                 sys.exit(1)
11903         
11904                         if not mymatch and x[0] not in "<>=~":
11905                                 mymatch = localtree.dep_match(x)
11906                         if not mymatch:
11907                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11908                                         (x, unmerge_action), noiselevel=-1)
11909                                 continue
11910
11911                         pkgmap.append(
11912                                 {"protected": set(), "selected": set(), "omitted": set()})
11913                         mykey = len(pkgmap) - 1
11914                         if unmerge_action=="unmerge":
11915                                         for y in mymatch:
11916                                                 if y not in all_selected:
11917                                                         pkgmap[mykey]["selected"].add(y)
11918                                                         all_selected.add(y)
11919                         elif unmerge_action == "prune":
11920                                 if len(mymatch) == 1:
11921                                         continue
11922                                 best_version = mymatch[0]
11923                                 best_slot = vartree.getslot(best_version)
11924                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11925                                 for mypkg in mymatch[1:]:
11926                                         myslot = vartree.getslot(mypkg)
11927                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11928                                         if (myslot == best_slot and mycounter > best_counter) or \
11929                                                 mypkg == portage.best([mypkg, best_version]):
11930                                                 if myslot == best_slot:
11931                                                         if mycounter < best_counter:
11932                                                                 # On slot collision, keep the one with the
11933                                                                 # highest counter since it is the most
11934                                                                 # recently installed.
11935                                                                 continue
11936                                                 best_version = mypkg
11937                                                 best_slot = myslot
11938                                                 best_counter = mycounter
11939                                 pkgmap[mykey]["protected"].add(best_version)
11940                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11941                                         if mypkg != best_version and mypkg not in all_selected)
11942                                 all_selected.update(pkgmap[mykey]["selected"])
11943                         else:
11944                                 # unmerge_action == "clean"
11945                                 slotmap={}
11946                                 for mypkg in mymatch:
11947                                         if unmerge_action == "clean":
11948                                                 myslot = localtree.getslot(mypkg)
11949                                         else:
11950                                                 # since we're pruning, we don't care about slots
11951                                                 # and put all the pkgs in together
11952                                                 myslot = 0
11953                                         if myslot not in slotmap:
11954                                                 slotmap[myslot] = {}
11955                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11956
11957                                 for mypkg in vartree.dbapi.cp_list(
11958                                         portage.dep_getkey(mymatch[0])):
11959                                         myslot = vartree.getslot(mypkg)
11960                                         if myslot not in slotmap:
11961                                                 slotmap[myslot] = {}
11962                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11963
11964                                 for myslot in slotmap:
11965                                         counterkeys = slotmap[myslot].keys()
11966                                         if not counterkeys:
11967                                                 continue
11968                                         counterkeys.sort()
11969                                         pkgmap[mykey]["protected"].add(
11970                                                 slotmap[myslot][counterkeys[-1]])
11971                                         del counterkeys[-1]
11972
11973                                         for counter in counterkeys[:]:
11974                                                 mypkg = slotmap[myslot][counter]
11975                                                 if mypkg not in mymatch:
11976                                                         counterkeys.remove(counter)
11977                                                         pkgmap[mykey]["protected"].add(
11978                                                                 slotmap[myslot][counter])
11979
11980                                         #be pretty and get them in order of merge:
11981                                         for ckey in counterkeys:
11982                                                 mypkg = slotmap[myslot][ckey]
11983                                                 if mypkg not in all_selected:
11984                                                         pkgmap[mykey]["selected"].add(mypkg)
11985                                                         all_selected.add(mypkg)
11986                                         # ok, now the last-merged package
11987                                         # is protected, and the rest are selected
11988                 numselected = len(all_selected)
11989                 if global_unmerge and not numselected:
11990                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11991                         return 0
11992         
11993                 if not numselected:
11994                         portage.writemsg_stdout(
11995                                 "\n>>> No packages selected for removal by " + \
11996                                 unmerge_action + "\n")
11997                         return 0
11998         finally:
11999                 if vdb_lock:
12000                         vartree.dbapi.flush_cache()
12001                         portage.locks.unlockdir(vdb_lock)
12002         
12003         from portage.sets.base import EditablePackageSet
12004         
12005         # generate a list of package sets that are directly or indirectly listed in "world",
12006         # as there is no persistent list of "installed" sets
12007         installed_sets = ["world"]
12008         stop = False
12009         pos = 0
12010         while not stop:
12011                 stop = True
12012                 pos = len(installed_sets)
12013                 for s in installed_sets[pos - 1:]:
12014                         if s not in sets:
12015                                 continue
12016                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12017                         if candidates:
12018                                 stop = False
12019                                 installed_sets += candidates
12020         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12021         del stop, pos
12022
12023         # we don't want to unmerge packages that are still listed in user-editable package sets
12024         # listed in "world" as they would be remerged on the next update of "world" or the 
12025         # relevant package sets.
12026         unknown_sets = set()
12027         for cp in xrange(len(pkgmap)):
12028                 for cpv in pkgmap[cp]["selected"].copy():
12029                         try:
12030                                 pkg = _pkg(cpv)
12031                         except KeyError:
12032                                 # It could have been uninstalled
12033                                 # by a concurrent process.
12034                                 continue
12035
12036                         if unmerge_action != "clean" and \
12037                                 root_config.root == "/" and \
12038                                 portage.match_from_list(
12039                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12040                                 msg = ("Not unmerging package %s since there is no valid " + \
12041                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
12042                                 for line in textwrap.wrap(msg, 75):
12043                                         out.eerror(line)
12044                                 # adjust pkgmap so the display output is correct
12045                                 pkgmap[cp]["selected"].remove(cpv)
12046                                 all_selected.remove(cpv)
12047                                 pkgmap[cp]["protected"].add(cpv)
12048                                 continue
12049
12050                         parents = []
12051                         for s in installed_sets:
12052                                 # skip sets that the user requested to unmerge, and skip world 
12053                                 # unless we're unmerging a package set (as the package would be 
12054                                 # removed from "world" later on)
12055                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12056                                         continue
12057
12058                                 if s not in sets:
12059                                         if s in unknown_sets:
12060                                                 continue
12061                                         unknown_sets.add(s)
12062                                         out = portage.output.EOutput()
12063                                         out.eerror(("Unknown set '@%s' in " + \
12064                                                 "%svar/lib/portage/world_sets") % \
12065                                                 (s, root_config.root))
12066                                         continue
12067
12068                                 # only check instances of EditablePackageSet as other classes are generally used for
12069                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12070                                 # user can't do much about them anyway)
12071                                 if isinstance(sets[s], EditablePackageSet):
12072
12073                                         # This is derived from a snippet of code in the
12074                                         # depgraph._iter_atoms_for_pkg() method.
12075                                         for atom in sets[s].iterAtomsForPackage(pkg):
12076                                                 inst_matches = vartree.dbapi.match(atom)
12077                                                 inst_matches.reverse() # descending order
12078                                                 higher_slot = None
12079                                                 for inst_cpv in inst_matches:
12080                                                         try:
12081                                                                 inst_pkg = _pkg(inst_cpv)
12082                                                         except KeyError:
12083                                                                 # It could have been uninstalled
12084                                                                 # by a concurrent process.
12085                                                                 continue
12086
12087                                                         if inst_pkg.cp != atom.cp:
12088                                                                 continue
12089                                                         if pkg >= inst_pkg:
12090                                                                 # This is descending order, and we're not
12091                                                                 # interested in any versions <= pkg given.
12092                                                                 break
12093                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12094                                                                 higher_slot = inst_pkg
12095                                                                 break
12096                                                 if higher_slot is None:
12097                                                         parents.append(s)
12098                                                         break
12099                         if parents:
12100                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12101                                 #print colorize("WARN", "but still listed in the following package sets:")
12102                                 #print "    %s\n" % ", ".join(parents)
12103                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12104                                 print colorize("WARN", "still referenced by the following package sets:")
12105                                 print "    %s\n" % ", ".join(parents)
12106                                 # adjust pkgmap so the display output is correct
12107                                 pkgmap[cp]["selected"].remove(cpv)
12108                                 all_selected.remove(cpv)
12109                                 pkgmap[cp]["protected"].add(cpv)
12110         
12111         del installed_sets
12112
12113         numselected = len(all_selected)
12114         if not numselected:
12115                 writemsg_level(
12116                         "\n>>> No packages selected for removal by " + \
12117                         unmerge_action + "\n")
12118                 return 0
12119
12120         # Unmerge order only matters in some cases
12121         if not ordered:
12122                 unordered = {}
12123                 for d in pkgmap:
12124                         selected = d["selected"]
12125                         if not selected:
12126                                 continue
12127                         cp = portage.cpv_getkey(iter(selected).next())
12128                         cp_dict = unordered.get(cp)
12129                         if cp_dict is None:
12130                                 cp_dict = {}
12131                                 unordered[cp] = cp_dict
12132                                 for k in d:
12133                                         cp_dict[k] = set()
12134                         for k, v in d.iteritems():
12135                                 cp_dict[k].update(v)
12136                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12137
12138         for x in xrange(len(pkgmap)):
12139                 selected = pkgmap[x]["selected"]
12140                 if not selected:
12141                         continue
12142                 for mytype, mylist in pkgmap[x].iteritems():
12143                         if mytype == "selected":
12144                                 continue
12145                         mylist.difference_update(all_selected)
12146                 cp = portage.cpv_getkey(iter(selected).next())
12147                 for y in localtree.dep_match(cp):
12148                         if y not in pkgmap[x]["omitted"] and \
12149                                 y not in pkgmap[x]["selected"] and \
12150                                 y not in pkgmap[x]["protected"] and \
12151                                 y not in all_selected:
12152                                 pkgmap[x]["omitted"].add(y)
12153                 if global_unmerge and not pkgmap[x]["selected"]:
12154                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12155                         continue
12156                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12157                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12158                                 "'%s' is part of your system profile.\n" % cp),
12159                                 level=logging.WARNING, noiselevel=-1)
12160                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12161                                 "be damaging to your system.\n\n"),
12162                                 level=logging.WARNING, noiselevel=-1)
12163                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12164                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12165                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12166                 if not quiet:
12167                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12168                 else:
12169                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12170                 for mytype in ["selected","protected","omitted"]:
12171                         if not quiet:
12172                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12173                         if pkgmap[x][mytype]:
12174                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12175                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12176                                 for pn, ver, rev in sorted_pkgs:
12177                                         if rev == "r0":
12178                                                 myversion = ver
12179                                         else:
12180                                                 myversion = ver + "-" + rev
12181                                         if mytype == "selected":
12182                                                 writemsg_level(
12183                                                         colorize("UNMERGE_WARN", myversion + " "),
12184                                                         noiselevel=-1)
12185                                         else:
12186                                                 writemsg_level(
12187                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12188                         else:
12189                                 writemsg_level("none ", noiselevel=-1)
12190                         if not quiet:
12191                                 writemsg_level("\n", noiselevel=-1)
12192                 if quiet:
12193                         writemsg_level("\n", noiselevel=-1)
12194
12195         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12196                 " packages are slated for removal.\n")
12197         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12198                         " and " + colorize("GOOD", "'omitted'") + \
12199                         " packages will not be removed.\n\n")
12200
12201         if "--pretend" in myopts:
12202                 #we're done... return
12203                 return 0
12204         if "--ask" in myopts:
12205                 if userquery("Would you like to unmerge these packages?")=="No":
12206                         # enter pretend mode for correct formatting of results
12207                         myopts["--pretend"] = True
12208                         print
12209                         print "Quitting."
12210                         print
12211                         return 0
12212         #the real unmerging begins, after a short delay....
12213         if clean_delay and not autoclean:
12214                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12215
12216         for x in xrange(len(pkgmap)):
12217                 for y in pkgmap[x]["selected"]:
12218                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12219                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12220                         mysplit = y.split("/")
12221                         #unmerge...
12222                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12223                                 mysettings, unmerge_action not in ["clean","prune"],
12224                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12225                                 scheduler=scheduler)
12226
12227                         if retval != os.EX_OK:
12228                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12229                                 if raise_on_error:
12230                                         raise UninstallFailure(retval)
12231                                 sys.exit(retval)
12232                         else:
12233                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12234                                         sets["world"].cleanPackage(vartree.dbapi, y)
12235                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12236         if clean_world and hasattr(sets["world"], "remove"):
12237                 for s in root_config.setconfig.active:
12238                         sets["world"].remove(SETPREFIX+s)
12239         return 1
12240
12241 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12242
12243         if os.path.exists("/usr/bin/install-info"):
12244                 out = portage.output.EOutput()
12245                 regen_infodirs=[]
12246                 for z in infodirs:
12247                         if z=='':
12248                                 continue
12249                         inforoot=normpath(root+z)
12250                         if os.path.isdir(inforoot):
12251                                 infomtime = long(os.stat(inforoot).st_mtime)
12252                                 if inforoot not in prev_mtimes or \
12253                                         prev_mtimes[inforoot] != infomtime:
12254                                                 regen_infodirs.append(inforoot)
12255
12256                 if not regen_infodirs:
12257                         portage.writemsg_stdout("\n")
12258                         out.einfo("GNU info directory index is up-to-date.")
12259                 else:
12260                         portage.writemsg_stdout("\n")
12261                         out.einfo("Regenerating GNU info directory index...")
12262
12263                         dir_extensions = ("", ".gz", ".bz2")
12264                         icount=0
12265                         badcount=0
12266                         errmsg = ""
12267                         for inforoot in regen_infodirs:
12268                                 if inforoot=='':
12269                                         continue
12270
12271                                 if not os.path.isdir(inforoot) or \
12272                                         not os.access(inforoot, os.W_OK):
12273                                         continue
12274
12275                                 file_list = os.listdir(inforoot)
12276                                 file_list.sort()
12277                                 dir_file = os.path.join(inforoot, "dir")
12278                                 moved_old_dir = False
12279                                 processed_count = 0
12280                                 for x in file_list:
12281                                         if x.startswith(".") or \
12282                                                 os.path.isdir(os.path.join(inforoot, x)):
12283                                                 continue
12284                                         if x.startswith("dir"):
12285                                                 skip = False
12286                                                 for ext in dir_extensions:
12287                                                         if x == "dir" + ext or \
12288                                                                 x == "dir" + ext + ".old":
12289                                                                 skip = True
12290                                                                 break
12291                                                 if skip:
12292                                                         continue
12293                                         if processed_count == 0:
12294                                                 for ext in dir_extensions:
12295                                                         try:
12296                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12297                                                                 moved_old_dir = True
12298                                                         except EnvironmentError, e:
12299                                                                 if e.errno != errno.ENOENT:
12300                                                                         raise
12301                                                                 del e
12302                                         processed_count += 1
12303                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12304                                         existsstr="already exists, for file `"
12305                                         if myso!="":
12306                                                 if re.search(existsstr,myso):
12307                                                         # Already exists... Don't increment the count for this.
12308                                                         pass
12309                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12310                                                         # This info file doesn't contain a DIR-header: install-info produces this
12311                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12312                                                         # Don't increment the count for this.
12313                                                         pass
12314                                                 else:
12315                                                         badcount=badcount+1
12316                                                         errmsg += myso + "\n"
12317                                         icount=icount+1
12318
12319                                 if moved_old_dir and not os.path.exists(dir_file):
12320                                         # We didn't generate a new dir file, so put the old file
12321                                         # back where it was originally found.
12322                                         for ext in dir_extensions:
12323                                                 try:
12324                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12325                                                 except EnvironmentError, e:
12326                                                         if e.errno != errno.ENOENT:
12327                                                                 raise
12328                                                         del e
12329
12330                                 # Clean dir.old cruft so that they don't prevent
12331                                 # unmerge of otherwise empty directories.
12332                                 for ext in dir_extensions:
12333                                         try:
12334                                                 os.unlink(dir_file + ext + ".old")
12335                                         except EnvironmentError, e:
12336                                                 if e.errno != errno.ENOENT:
12337                                                         raise
12338                                                 del e
12339
12340                                 #update mtime so we can potentially avoid regenerating.
12341                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12342
12343                         if badcount:
12344                                 out.eerror("Processed %d info files; %d errors." % \
12345                                         (icount, badcount))
12346                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12347                         else:
12348                                 if icount > 0:
12349                                         out.einfo("Processed %d info files." % (icount,))
12350
12351
12352 def display_news_notification(root_config, myopts):
12353         target_root = root_config.root
12354         trees = root_config.trees
12355         settings = trees["vartree"].settings
12356         portdb = trees["porttree"].dbapi
12357         vardb = trees["vartree"].dbapi
12358         NEWS_PATH = os.path.join("metadata", "news")
12359         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12360         newsReaderDisplay = False
12361         update = "--pretend" not in myopts
12362
12363         for repo in portdb.getRepositories():
12364                 unreadItems = checkUpdatedNewsItems(
12365                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12366                 if unreadItems:
12367                         if not newsReaderDisplay:
12368                                 newsReaderDisplay = True
12369                                 print
12370                         print colorize("WARN", " * IMPORTANT:"),
12371                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12372                         
12373         
12374         if newsReaderDisplay:
12375                 print colorize("WARN", " *"),
12376                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12377                 print
12378
12379 def display_preserved_libs(vardbapi):
12380         MAX_DISPLAY = 3
12381
12382         # Ensure the registry is consistent with existing files.
12383         vardbapi.plib_registry.pruneNonExisting()
12384
12385         if vardbapi.plib_registry.hasEntries():
12386                 print
12387                 print colorize("WARN", "!!!") + " existing preserved libs:"
12388                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12389                 linkmap = vardbapi.linkmap
12390                 consumer_map = {}
12391                 owners = {}
12392                 linkmap_broken = False
12393
12394                 try:
12395                         linkmap.rebuild()
12396                 except portage.exception.CommandNotFound, e:
12397                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12398                                 level=logging.ERROR, noiselevel=-1)
12399                         del e
12400                         linkmap_broken = True
12401                 else:
12402                         search_for_owners = set()
12403                         for cpv in plibdata:
12404                                 internal_plib_keys = set(linkmap._obj_key(f) \
12405                                         for f in plibdata[cpv])
12406                                 for f in plibdata[cpv]:
12407                                         if f in consumer_map:
12408                                                 continue
12409                                         consumers = []
12410                                         for c in linkmap.findConsumers(f):
12411                                                 # Filter out any consumers that are also preserved libs
12412                                                 # belonging to the same package as the provider.
12413                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12414                                                         consumers.append(c)
12415                                         consumers.sort()
12416                                         consumer_map[f] = consumers
12417                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12418
12419                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12420
12421                 for cpv in plibdata:
12422                         print colorize("WARN", ">>>") + " package: %s" % cpv
12423                         samefile_map = {}
12424                         for f in plibdata[cpv]:
12425                                 obj_key = linkmap._obj_key(f)
12426                                 alt_paths = samefile_map.get(obj_key)
12427                                 if alt_paths is None:
12428                                         alt_paths = set()
12429                                         samefile_map[obj_key] = alt_paths
12430                                 alt_paths.add(f)
12431
12432                         for alt_paths in samefile_map.itervalues():
12433                                 alt_paths = sorted(alt_paths)
12434                                 for p in alt_paths:
12435                                         print colorize("WARN", " * ") + " - %s" % (p,)
12436                                 f = alt_paths[0]
12437                                 consumers = consumer_map.get(f, [])
12438                                 for c in consumers[:MAX_DISPLAY]:
12439                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12440                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12441                                 if len(consumers) == MAX_DISPLAY + 1:
12442                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12443                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12444                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12445                                 elif len(consumers) > MAX_DISPLAY:
12446                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12447                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12448
12449
12450 def _flush_elog_mod_echo():
12451         """
12452         Dump the mod_echo output now so that our other
12453         notifications are shown last.
12454         @rtype: bool
12455         @returns: True if messages were shown, False otherwise.
12456         """
12457         messages_shown = False
12458         try:
12459                 from portage.elog import mod_echo
12460         except ImportError:
12461                 pass # happens during downgrade to a version without the module
12462         else:
12463                 messages_shown = bool(mod_echo._items)
12464                 mod_echo.finalize()
12465         return messages_shown
12466
12467 def post_emerge(root_config, myopts, mtimedb, retval):
12468         """
12469         Misc. things to run at the end of a merge session.
12470         
12471         Update Info Files
12472         Update Config Files
12473         Update News Items
12474         Commit mtimeDB
12475         Display preserved libs warnings
12476         Exit Emerge
12477
12478         @param trees: A dictionary mapping each ROOT to it's package databases
12479         @type trees: dict
12480         @param mtimedb: The mtimeDB to store data needed across merge invocations
12481         @type mtimedb: MtimeDB class instance
12482         @param retval: Emerge's return value
12483         @type retval: Int
12484         @rype: None
12485         @returns:
12486         1.  Calls sys.exit(retval)
12487         """
12488
12489         target_root = root_config.root
12490         trees = { target_root : root_config.trees }
12491         vardbapi = trees[target_root]["vartree"].dbapi
12492         settings = vardbapi.settings
12493         info_mtimes = mtimedb["info"]
12494
12495         # Load the most current variables from ${ROOT}/etc/profile.env
12496         settings.unlock()
12497         settings.reload()
12498         settings.regenerate()
12499         settings.lock()
12500
12501         config_protect = settings.get("CONFIG_PROTECT","").split()
12502         infodirs = settings.get("INFOPATH","").split(":") + \
12503                 settings.get("INFODIR","").split(":")
12504
12505         os.chdir("/")
12506
12507         if retval == os.EX_OK:
12508                 exit_msg = " *** exiting successfully."
12509         else:
12510                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12511         emergelog("notitles" not in settings.features, exit_msg)
12512
12513         _flush_elog_mod_echo()
12514
12515         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12516         if "--pretend" in myopts or (counter_hash is not None and \
12517                 counter_hash == vardbapi._counter_hash()):
12518                 display_news_notification(root_config, myopts)
12519                 # If vdb state has not changed then there's nothing else to do.
12520                 sys.exit(retval)
12521
12522         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12523         portage.util.ensure_dirs(vdb_path)
12524         vdb_lock = None
12525         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12526                 vdb_lock = portage.locks.lockdir(vdb_path)
12527
12528         if vdb_lock:
12529                 try:
12530                         if "noinfo" not in settings.features:
12531                                 chk_updated_info_files(target_root,
12532                                         infodirs, info_mtimes, retval)
12533                         mtimedb.commit()
12534                 finally:
12535                         if vdb_lock:
12536                                 portage.locks.unlockdir(vdb_lock)
12537
12538         chk_updated_cfg_files(target_root, config_protect)
12539         
12540         display_news_notification(root_config, myopts)
12541         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12542                 display_preserved_libs(vardbapi)        
12543
12544         sys.exit(retval)
12545
12546
12547 def chk_updated_cfg_files(target_root, config_protect):
12548         if config_protect:
12549                 #number of directories with some protect files in them
12550                 procount=0
12551                 for x in config_protect:
12552                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12553                         if not os.access(x, os.W_OK):
12554                                 # Avoid Permission denied errors generated
12555                                 # later by `find`.
12556                                 continue
12557                         try:
12558                                 mymode = os.lstat(x).st_mode
12559                         except OSError:
12560                                 continue
12561                         if stat.S_ISLNK(mymode):
12562                                 # We want to treat it like a directory if it
12563                                 # is a symlink to an existing directory.
12564                                 try:
12565                                         real_mode = os.stat(x).st_mode
12566                                         if stat.S_ISDIR(real_mode):
12567                                                 mymode = real_mode
12568                                 except OSError:
12569                                         pass
12570                         if stat.S_ISDIR(mymode):
12571                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12572                         else:
12573                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12574                                         os.path.split(x.rstrip(os.path.sep))
12575                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12576                         a = commands.getstatusoutput(mycommand)
12577                         if a[0] != 0:
12578                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12579                                 sys.stderr.flush()
12580                                 # Show the error message alone, sending stdout to /dev/null.
12581                                 os.system(mycommand + " 1>/dev/null")
12582                         else:
12583                                 files = a[1].split('\0')
12584                                 # split always produces an empty string as the last element
12585                                 if files and not files[-1]:
12586                                         del files[-1]
12587                                 if files:
12588                                         procount += 1
12589                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12590                                         if stat.S_ISDIR(mymode):
12591                                                  print "%d config files in '%s' need updating." % \
12592                                                         (len(files), x)
12593                                         else:
12594                                                  print "config file '%s' needs updating." % x
12595
12596                 if procount:
12597                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12598                                 " section of the " + bold("emerge")
12599                         print " "+yellow("*")+" man page to learn how to update config files."
12600
12601 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12602         update=False):
12603         """
12604         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12605         Returns the number of unread (yet relevent) items.
12606         
12607         @param portdb: a portage tree database
12608         @type portdb: pordbapi
12609         @param vardb: an installed package database
12610         @type vardb: vardbapi
12611         @param NEWS_PATH:
12612         @type NEWS_PATH:
12613         @param UNREAD_PATH:
12614         @type UNREAD_PATH:
12615         @param repo_id:
12616         @type repo_id:
12617         @rtype: Integer
12618         @returns:
12619         1.  The number of unread but relevant news items.
12620         
12621         """
12622         from portage.news import NewsManager
12623         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12624         return manager.getUnreadItems( repo_id, update=update )
12625
12626 def insert_category_into_atom(atom, category):
12627         alphanum = re.search(r'\w', atom)
12628         if alphanum:
12629                 ret = atom[:alphanum.start()] + "%s/" % category + \
12630                         atom[alphanum.start():]
12631         else:
12632                 ret = None
12633         return ret
12634
12635 def is_valid_package_atom(x):
12636         if "/" not in x:
12637                 alphanum = re.search(r'\w', x)
12638                 if alphanum:
12639                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12640         return portage.isvalidatom(x)
12641
12642 def show_blocker_docs_link():
12643         print
12644         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12645         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12646         print
12647         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12648         print
12649
12650 def show_mask_docs():
12651         print "For more information, see the MASKED PACKAGES section in the emerge"
12652         print "man page or refer to the Gentoo Handbook."
12653
12654 def action_sync(settings, trees, mtimedb, myopts, myaction):
12655         xterm_titles = "notitles" not in settings.features
12656         emergelog(xterm_titles, " === sync")
12657         myportdir = settings.get("PORTDIR", None)
12658         out = portage.output.EOutput()
12659         if not myportdir:
12660                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12661                 sys.exit(1)
12662         if myportdir[-1]=="/":
12663                 myportdir=myportdir[:-1]
12664         try:
12665                 st = os.stat(myportdir)
12666         except OSError:
12667                 st = None
12668         if st is None:
12669                 print ">>>",myportdir,"not found, creating it."
12670                 os.makedirs(myportdir,0755)
12671                 st = os.stat(myportdir)
12672
12673         spawn_kwargs = {}
12674         spawn_kwargs["env"] = settings.environ()
12675         if 'usersync' in settings.features and \
12676                 portage.data.secpass >= 2 and \
12677                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12678                 st.st_gid != os.getgid() and st.st_mode & 0070):
12679                 try:
12680                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12681                 except KeyError:
12682                         pass
12683                 else:
12684                         # Drop privileges when syncing, in order to match
12685                         # existing uid/gid settings.
12686                         spawn_kwargs["uid"]    = st.st_uid
12687                         spawn_kwargs["gid"]    = st.st_gid
12688                         spawn_kwargs["groups"] = [st.st_gid]
12689                         spawn_kwargs["env"]["HOME"] = homedir
12690                         umask = 0002
12691                         if not st.st_mode & 0020:
12692                                 umask = umask | 0020
12693                         spawn_kwargs["umask"] = umask
12694
12695         syncuri = settings.get("SYNC", "").strip()
12696         if not syncuri:
12697                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12698                         noiselevel=-1, level=logging.ERROR)
12699                 return 1
12700
12701         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12702         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12703
12704         os.umask(0022)
12705         dosyncuri = syncuri
12706         updatecache_flg = False
12707         if myaction == "metadata":
12708                 print "skipping sync"
12709                 updatecache_flg = True
12710         elif ".git" in vcs_dirs:
12711                 # Update existing git repository, and ignore the syncuri. We are
12712                 # going to trust the user and assume that the user is in the branch
12713                 # that he/she wants updated. We'll let the user manage branches with
12714                 # git directly.
12715                 if portage.process.find_binary("git") is None:
12716                         msg = ["Command not found: git",
12717                         "Type \"emerge dev-util/git\" to enable git support."]
12718                         for l in msg:
12719                                 writemsg_level("!!! %s\n" % l,
12720                                         level=logging.ERROR, noiselevel=-1)
12721                         return 1
12722                 msg = ">>> Starting git pull in %s..." % myportdir
12723                 emergelog(xterm_titles, msg )
12724                 writemsg_level(msg + "\n")
12725                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12726                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12727                 if exitcode != os.EX_OK:
12728                         msg = "!!! git pull error in %s." % myportdir
12729                         emergelog(xterm_titles, msg)
12730                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12731                         return exitcode
12732                 msg = ">>> Git pull in %s successful" % myportdir
12733                 emergelog(xterm_titles, msg)
12734                 writemsg_level(msg + "\n")
12735                 exitcode = git_sync_timestamps(settings, myportdir)
12736                 if exitcode == os.EX_OK:
12737                         updatecache_flg = True
12738         elif syncuri[:8]=="rsync://":
12739                 for vcs_dir in vcs_dirs:
12740                         writemsg_level(("!!! %s appears to be under revision " + \
12741                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12742                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12743                         return 1
12744                 if not os.path.exists("/usr/bin/rsync"):
12745                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12746                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12747                         sys.exit(1)
12748                 mytimeout=180
12749
12750                 rsync_opts = []
12751                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12752                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12753                         rsync_opts.extend([
12754                                 "--recursive",    # Recurse directories
12755                                 "--links",        # Consider symlinks
12756                                 "--safe-links",   # Ignore links outside of tree
12757                                 "--perms",        # Preserve permissions
12758                                 "--times",        # Preserive mod times
12759                                 "--compress",     # Compress the data transmitted
12760                                 "--force",        # Force deletion on non-empty dirs
12761                                 "--whole-file",   # Don't do block transfers, only entire files
12762                                 "--delete",       # Delete files that aren't in the master tree
12763                                 "--stats",        # Show final statistics about what was transfered
12764                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12765                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12766                                 "--exclude=/local",       # Exclude local     from consideration
12767                                 "--exclude=/packages",    # Exclude packages  from consideration
12768                         ])
12769
12770                 else:
12771                         # The below validation is not needed when using the above hardcoded
12772                         # defaults.
12773
12774                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12775                         rsync_opts.extend(
12776                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12777                         for opt in ("--recursive", "--times"):
12778                                 if opt not in rsync_opts:
12779                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12780                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12781                                         rsync_opts.append(opt)
12782         
12783                         for exclude in ("distfiles", "local", "packages"):
12784                                 opt = "--exclude=/%s" % exclude
12785                                 if opt not in rsync_opts:
12786                                         portage.writemsg(yellow("WARNING:") + \
12787                                         " adding required option %s not included in "  % opt + \
12788                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12789                                         rsync_opts.append(opt)
12790         
12791                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12792                                 def rsync_opt_startswith(opt_prefix):
12793                                         for x in rsync_opts:
12794                                                 if x.startswith(opt_prefix):
12795                                                         return True
12796                                         return False
12797
12798                                 if not rsync_opt_startswith("--timeout="):
12799                                         rsync_opts.append("--timeout=%d" % mytimeout)
12800
12801                                 for opt in ("--compress", "--whole-file"):
12802                                         if opt not in rsync_opts:
12803                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12804                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12805                                                 rsync_opts.append(opt)
12806
12807                 if "--quiet" in myopts:
12808                         rsync_opts.append("--quiet")    # Shut up a lot
12809                 else:
12810                         rsync_opts.append("--verbose")  # Print filelist
12811
12812                 if "--verbose" in myopts:
12813                         rsync_opts.append("--progress")  # Progress meter for each file
12814
12815                 if "--debug" in myopts:
12816                         rsync_opts.append("--checksum") # Force checksum on all files
12817
12818                 # Real local timestamp file.
12819                 servertimestampfile = os.path.join(
12820                         myportdir, "metadata", "timestamp.chk")
12821
12822                 content = portage.util.grabfile(servertimestampfile)
12823                 mytimestamp = 0
12824                 if content:
12825                         try:
12826                                 mytimestamp = time.mktime(time.strptime(content[0],
12827                                         "%a, %d %b %Y %H:%M:%S +0000"))
12828                         except (OverflowError, ValueError):
12829                                 pass
12830                 del content
12831
12832                 try:
12833                         rsync_initial_timeout = \
12834                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12835                 except ValueError:
12836                         rsync_initial_timeout = 15
12837
12838                 try:
12839                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12840                 except SystemExit, e:
12841                         raise # Needed else can't exit
12842                 except:
12843                         maxretries=3 #default number of retries
12844
12845                 retries=0
12846                 user_name, hostname, port = re.split(
12847                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12848                 if port is None:
12849                         port=""
12850                 if user_name is None:
12851                         user_name=""
12852                 updatecache_flg=True
12853                 all_rsync_opts = set(rsync_opts)
12854                 extra_rsync_opts = shlex.split(
12855                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12856                 all_rsync_opts.update(extra_rsync_opts)
12857                 family = socket.AF_INET
12858                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12859                         family = socket.AF_INET
12860                 elif socket.has_ipv6 and \
12861                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12862                         family = socket.AF_INET6
12863                 ips=[]
12864                 SERVER_OUT_OF_DATE = -1
12865                 EXCEEDED_MAX_RETRIES = -2
12866                 while (1):
12867                         if ips:
12868                                 del ips[0]
12869                         if ips==[]:
12870                                 try:
12871                                         for addrinfo in socket.getaddrinfo(
12872                                                 hostname, None, family, socket.SOCK_STREAM):
12873                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12874                                                         # IPv6 addresses need to be enclosed in square brackets
12875                                                         ips.append("[%s]" % addrinfo[4][0])
12876                                                 else:
12877                                                         ips.append(addrinfo[4][0])
12878                                         from random import shuffle
12879                                         shuffle(ips)
12880                                 except SystemExit, e:
12881                                         raise # Needed else can't exit
12882                                 except Exception, e:
12883                                         print "Notice:",str(e)
12884                                         dosyncuri=syncuri
12885
12886                         if ips:
12887                                 try:
12888                                         dosyncuri = syncuri.replace(
12889                                                 "//" + user_name + hostname + port + "/",
12890                                                 "//" + user_name + ips[0] + port + "/", 1)
12891                                 except SystemExit, e:
12892                                         raise # Needed else can't exit
12893                                 except Exception, e:
12894                                         print "Notice:",str(e)
12895                                         dosyncuri=syncuri
12896
12897                         if (retries==0):
12898                                 if "--ask" in myopts:
12899                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12900                                                 print
12901                                                 print "Quitting."
12902                                                 print
12903                                                 sys.exit(0)
12904                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12905                                 if "--quiet" not in myopts:
12906                                         print ">>> Starting rsync with "+dosyncuri+"..."
12907                         else:
12908                                 emergelog(xterm_titles,
12909                                         ">>> Starting retry %d of %d with %s" % \
12910                                                 (retries,maxretries,dosyncuri))
12911                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12912
12913                         if mytimestamp != 0 and "--quiet" not in myopts:
12914                                 print ">>> Checking server timestamp ..."
12915
12916                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12917
12918                         if "--debug" in myopts:
12919                                 print rsynccommand
12920
12921                         exitcode = os.EX_OK
12922                         servertimestamp = 0
12923                         # Even if there's no timestamp available locally, fetch the
12924                         # timestamp anyway as an initial probe to verify that the server is
12925                         # responsive.  This protects us from hanging indefinitely on a
12926                         # connection attempt to an unresponsive server which rsync's
12927                         # --timeout option does not prevent.
12928                         if True:
12929                                 # Temporary file for remote server timestamp comparison.
12930                                 from tempfile import mkstemp
12931                                 fd, tmpservertimestampfile = mkstemp()
12932                                 os.close(fd)
12933                                 mycommand = rsynccommand[:]
12934                                 mycommand.append(dosyncuri.rstrip("/") + \
12935                                         "/metadata/timestamp.chk")
12936                                 mycommand.append(tmpservertimestampfile)
12937                                 content = None
12938                                 mypids = []
12939                                 try:
12940                                         def timeout_handler(signum, frame):
12941                                                 raise portage.exception.PortageException("timed out")
12942                                         signal.signal(signal.SIGALRM, timeout_handler)
12943                                         # Timeout here in case the server is unresponsive.  The
12944                                         # --timeout rsync option doesn't apply to the initial
12945                                         # connection attempt.
12946                                         if rsync_initial_timeout:
12947                                                 signal.alarm(rsync_initial_timeout)
12948                                         try:
12949                                                 mypids.extend(portage.process.spawn(
12950                                                         mycommand, env=settings.environ(), returnpid=True))
12951                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12952                                                 content = portage.grabfile(tmpservertimestampfile)
12953                                         finally:
12954                                                 if rsync_initial_timeout:
12955                                                         signal.alarm(0)
12956                                                 try:
12957                                                         os.unlink(tmpservertimestampfile)
12958                                                 except OSError:
12959                                                         pass
12960                                 except portage.exception.PortageException, e:
12961                                         # timed out
12962                                         print e
12963                                         del e
12964                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12965                                                 os.kill(mypids[0], signal.SIGTERM)
12966                                                 os.waitpid(mypids[0], 0)
12967                                         # This is the same code rsync uses for timeout.
12968                                         exitcode = 30
12969                                 else:
12970                                         if exitcode != os.EX_OK:
12971                                                 if exitcode & 0xff:
12972                                                         exitcode = (exitcode & 0xff) << 8
12973                                                 else:
12974                                                         exitcode = exitcode >> 8
12975                                 if mypids:
12976                                         portage.process.spawned_pids.remove(mypids[0])
12977                                 if content:
12978                                         try:
12979                                                 servertimestamp = time.mktime(time.strptime(
12980                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12981                                         except (OverflowError, ValueError):
12982                                                 pass
12983                                 del mycommand, mypids, content
12984                         if exitcode == os.EX_OK:
12985                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12986                                         emergelog(xterm_titles,
12987                                                 ">>> Cancelling sync -- Already current.")
12988                                         print
12989                                         print ">>>"
12990                                         print ">>> Timestamps on the server and in the local repository are the same."
12991                                         print ">>> Cancelling all further sync action. You are already up to date."
12992                                         print ">>>"
12993                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12994                                         print ">>>"
12995                                         print
12996                                         sys.exit(0)
12997                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12998                                         emergelog(xterm_titles,
12999                                                 ">>> Server out of date: %s" % dosyncuri)
13000                                         print
13001                                         print ">>>"
13002                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13003                                         print ">>>"
13004                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13005                                         print ">>>"
13006                                         print
13007                                         exitcode = SERVER_OUT_OF_DATE
13008                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13009                                         # actual sync
13010                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13011                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13012                                         if exitcode in [0,1,3,4,11,14,20,21]:
13013                                                 break
13014                         elif exitcode in [1,3,4,11,14,20,21]:
13015                                 break
13016                         else:
13017                                 # Code 2 indicates protocol incompatibility, which is expected
13018                                 # for servers with protocol < 29 that don't support
13019                                 # --prune-empty-directories.  Retry for a server that supports
13020                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
13021                                 pass
13022
13023                         retries=retries+1
13024
13025                         if retries<=maxretries:
13026                                 print ">>> Retrying..."
13027                                 time.sleep(11)
13028                         else:
13029                                 # over retries
13030                                 # exit loop
13031                                 updatecache_flg=False
13032                                 exitcode = EXCEEDED_MAX_RETRIES
13033                                 break
13034
13035                 if (exitcode==0):
13036                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13037                 elif exitcode == SERVER_OUT_OF_DATE:
13038                         sys.exit(1)
13039                 elif exitcode == EXCEEDED_MAX_RETRIES:
13040                         sys.stderr.write(
13041                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13042                         sys.exit(1)
13043                 elif (exitcode>0):
13044                         msg = []
13045                         if exitcode==1:
13046                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13047                                 msg.append("that your SYNC statement is proper.")
13048                                 msg.append("SYNC=" + settings["SYNC"])
13049                         elif exitcode==11:
13050                                 msg.append("Rsync has reported that there is a File IO error. Normally")
13051                                 msg.append("this means your disk is full, but can be caused by corruption")
13052                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13053                                 msg.append("and try again after the problem has been fixed.")
13054                                 msg.append("PORTDIR=" + settings["PORTDIR"])
13055                         elif exitcode==20:
13056                                 msg.append("Rsync was killed before it finished.")
13057                         else:
13058                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13059                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13060                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13061                                 msg.append("temporary problem unless complications exist with your network")
13062                                 msg.append("(and possibly your system's filesystem) configuration.")
13063                         for line in msg:
13064                                 out.eerror(line)
13065                         sys.exit(exitcode)
13066         elif syncuri[:6]=="cvs://":
13067                 if not os.path.exists("/usr/bin/cvs"):
13068                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13069                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13070                         sys.exit(1)
13071                 cvsroot=syncuri[6:]
13072                 cvsdir=os.path.dirname(myportdir)
13073                 if not os.path.exists(myportdir+"/CVS"):
13074                         #initial checkout
13075                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13076                         if os.path.exists(cvsdir+"/gentoo-x86"):
13077                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13078                                 sys.exit(1)
13079                         try:
13080                                 os.rmdir(myportdir)
13081                         except OSError, e:
13082                                 if e.errno != errno.ENOENT:
13083                                         sys.stderr.write(
13084                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13085                                         sys.exit(1)
13086                                 del e
13087                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13088                                 print "!!! cvs checkout error; exiting."
13089                                 sys.exit(1)
13090                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13091                 else:
13092                         #cvs update
13093                         print ">>> Starting cvs update with "+syncuri+"..."
13094                         retval = portage.process.spawn_bash(
13095                                 "cd %s; cvs -z0 -q update -dP" % \
13096                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13097                         if retval != os.EX_OK:
13098                                 sys.exit(retval)
13099                 dosyncuri = syncuri
13100         else:
13101                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13102                         noiselevel=-1, level=logging.ERROR)
13103                 return 1
13104
13105         if updatecache_flg and  \
13106                 myaction != "metadata" and \
13107                 "metadata-transfer" not in settings.features:
13108                 updatecache_flg = False
13109
13110         # Reload the whole config from scratch.
13111         settings, trees, mtimedb = load_emerge_config(trees=trees)
13112         root_config = trees[settings["ROOT"]]["root_config"]
13113         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13114
13115         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13116                 action_metadata(settings, portdb, myopts)
13117
13118         if portage._global_updates(trees, mtimedb["updates"]):
13119                 mtimedb.commit()
13120                 # Reload the whole config from scratch.
13121                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13122                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13123                 root_config = trees[settings["ROOT"]]["root_config"]
13124
13125         mybestpv = portdb.xmatch("bestmatch-visible",
13126                 portage.const.PORTAGE_PACKAGE_ATOM)
13127         mypvs = portage.best(
13128                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13129                 portage.const.PORTAGE_PACKAGE_ATOM))
13130
13131         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13132
13133         if myaction != "metadata":
13134                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13135                         retval = portage.process.spawn(
13136                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13137                                 dosyncuri], env=settings.environ())
13138                         if retval != os.EX_OK:
13139                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13140
13141         if(mybestpv != mypvs) and not "--quiet" in myopts:
13142                 print
13143                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13144                 print red(" * ")+"that you update portage now, before any other packages are updated."
13145                 print
13146                 print red(" * ")+"To update portage, run 'emerge portage' now."
13147                 print
13148         
13149         display_news_notification(root_config, myopts)
13150         return os.EX_OK
13151
13152 def git_sync_timestamps(settings, portdir):
13153         """
13154         Since git doesn't preserve timestamps, synchronize timestamps between
13155         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13156         for a given file as long as the file in the working tree is not modified
13157         (relative to HEAD).
13158         """
13159         cache_dir = os.path.join(portdir, "metadata", "cache")
13160         if not os.path.isdir(cache_dir):
13161                 return os.EX_OK
13162         writemsg_level(">>> Synchronizing timestamps...\n")
13163
13164         from portage.cache.cache_errors import CacheError
13165         try:
13166                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13167                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13168         except CacheError, e:
13169                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13170                         level=logging.ERROR, noiselevel=-1)
13171                 return 1
13172
13173         ec_dir = os.path.join(portdir, "eclass")
13174         try:
13175                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13176                         if f.endswith(".eclass"))
13177         except OSError, e:
13178                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13179                         level=logging.ERROR, noiselevel=-1)
13180                 return 1
13181
13182         args = [portage.const.BASH_BINARY, "-c",
13183                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13184                 portage._shell_quote(portdir)]
13185         import subprocess
13186         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13187         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13188         rval = proc.wait()
13189         if rval != os.EX_OK:
13190                 return rval
13191
13192         modified_eclasses = set(ec for ec in ec_names \
13193                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13194
13195         updated_ec_mtimes = {}
13196
13197         for cpv in cache_db:
13198                 cpv_split = portage.catpkgsplit(cpv)
13199                 if cpv_split is None:
13200                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13201                                 level=logging.ERROR, noiselevel=-1)
13202                         continue
13203
13204                 cat, pn, ver, rev = cpv_split
13205                 cat, pf = portage.catsplit(cpv)
13206                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13207                 if relative_eb_path in modified_files:
13208                         continue
13209
13210                 try:
13211                         cache_entry = cache_db[cpv]
13212                         eb_mtime = cache_entry.get("_mtime_")
13213                         ec_mtimes = cache_entry.get("_eclasses_")
13214                 except KeyError:
13215                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13216                                 level=logging.ERROR, noiselevel=-1)
13217                         continue
13218                 except CacheError, e:
13219                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13220                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13221                         continue
13222
13223                 if eb_mtime is None:
13224                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13225                                 level=logging.ERROR, noiselevel=-1)
13226                         continue
13227
13228                 try:
13229                         eb_mtime = long(eb_mtime)
13230                 except ValueError:
13231                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13232                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13233                         continue
13234
13235                 if ec_mtimes is None:
13236                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13237                                 level=logging.ERROR, noiselevel=-1)
13238                         continue
13239
13240                 if modified_eclasses.intersection(ec_mtimes):
13241                         continue
13242
13243                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13244                 if missing_eclasses:
13245                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13246                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13247                                 noiselevel=-1)
13248                         continue
13249
13250                 eb_path = os.path.join(portdir, relative_eb_path)
13251                 try:
13252                         current_eb_mtime = os.stat(eb_path)
13253                 except OSError:
13254                         writemsg_level("!!! Missing ebuild: %s\n" % \
13255                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13256                         continue
13257
13258                 inconsistent = False
13259                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13260                         updated_mtime = updated_ec_mtimes.get(ec)
13261                         if updated_mtime is not None and updated_mtime != ec_mtime:
13262                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13263                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13264                                 inconsistent = True
13265                                 break
13266
13267                 if inconsistent:
13268                         continue
13269
13270                 if current_eb_mtime != eb_mtime:
13271                         os.utime(eb_path, (eb_mtime, eb_mtime))
13272
13273                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13274                         if ec in updated_ec_mtimes:
13275                                 continue
13276                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13277                         current_mtime = long(os.stat(ec_path).st_mtime)
13278                         if current_mtime != ec_mtime:
13279                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13280                         updated_ec_mtimes[ec] = ec_mtime
13281
13282         return os.EX_OK
13283
13284 def action_metadata(settings, portdb, myopts):
13285         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13286         old_umask = os.umask(0002)
13287         cachedir = os.path.normpath(settings.depcachedir)
13288         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13289                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13290                                         "/sys", "/tmp", "/usr",  "/var"]:
13291                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13292                         "ROOT DIRECTORY ON YOUR SYSTEM."
13293                 print >> sys.stderr, \
13294                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13295                 sys.exit(73)
13296         if not os.path.exists(cachedir):
13297                 os.mkdir(cachedir)
13298
13299         ec = portage.eclass_cache.cache(portdb.porttree_root)
13300         myportdir = os.path.realpath(settings["PORTDIR"])
13301         cm = settings.load_best_module("portdbapi.metadbmodule")(
13302                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13303
13304         from portage.cache import util
13305
13306         class percentage_noise_maker(util.quiet_mirroring):
13307                 def __init__(self, dbapi):
13308                         self.dbapi = dbapi
13309                         self.cp_all = dbapi.cp_all()
13310                         l = len(self.cp_all)
13311                         self.call_update_min = 100000000
13312                         self.min_cp_all = l/100.0
13313                         self.count = 1
13314                         self.pstr = ''
13315
13316                 def __iter__(self):
13317                         for x in self.cp_all:
13318                                 self.count += 1
13319                                 if self.count > self.min_cp_all:
13320                                         self.call_update_min = 0
13321                                         self.count = 0
13322                                 for y in self.dbapi.cp_list(x):
13323                                         yield y
13324                         self.call_update_mine = 0
13325
13326                 def update(self, *arg):
13327                         try:
13328                                 self.pstr = int(self.pstr) + 1
13329                         except ValueError:
13330                                 self.pstr = 1
13331                         sys.stdout.write("%s%i%%" % \
13332                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13333                         sys.stdout.flush()
13334                         self.call_update_min = 10000000
13335
13336                 def finish(self, *arg):
13337                         sys.stdout.write("\b\b\b\b100%\n")
13338                         sys.stdout.flush()
13339
13340         if "--quiet" in myopts:
13341                 def quicky_cpv_generator(cp_all_list):
13342                         for x in cp_all_list:
13343                                 for y in portdb.cp_list(x):
13344                                         yield y
13345                 source = quicky_cpv_generator(portdb.cp_all())
13346                 noise_maker = portage.cache.util.quiet_mirroring()
13347         else:
13348                 noise_maker = source = percentage_noise_maker(portdb)
13349         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13350                 eclass_cache=ec, verbose_instance=noise_maker)
13351
13352         sys.stdout.flush()
13353         os.umask(old_umask)
13354
13355 def action_regen(settings, portdb, max_jobs, max_load):
13356         xterm_titles = "notitles" not in settings.features
13357         emergelog(xterm_titles, " === regen")
13358         #regenerate cache entries
13359         portage.writemsg_stdout("Regenerating cache entries...\n")
13360         try:
13361                 os.close(sys.stdin.fileno())
13362         except SystemExit, e:
13363                 raise # Needed else can't exit
13364         except:
13365                 pass
13366         sys.stdout.flush()
13367
13368         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13369         regen.run()
13370
13371         portage.writemsg_stdout("done!\n")
13372         return regen.returncode
13373
13374 def action_config(settings, trees, myopts, myfiles):
13375         if len(myfiles) != 1:
13376                 print red("!!! config can only take a single package atom at this time\n")
13377                 sys.exit(1)
13378         if not is_valid_package_atom(myfiles[0]):
13379                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13380                         noiselevel=-1)
13381                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13382                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13383                 sys.exit(1)
13384         print
13385         try:
13386                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13387         except portage.exception.AmbiguousPackageName, e:
13388                 # Multiple matches thrown from cpv_expand
13389                 pkgs = e.args[0]
13390         if len(pkgs) == 0:
13391                 print "No packages found.\n"
13392                 sys.exit(0)
13393         elif len(pkgs) > 1:
13394                 if "--ask" in myopts:
13395                         options = []
13396                         print "Please select a package to configure:"
13397                         idx = 0
13398                         for pkg in pkgs:
13399                                 idx += 1
13400                                 options.append(str(idx))
13401                                 print options[-1]+") "+pkg
13402                         print "X) Cancel"
13403                         options.append("X")
13404                         idx = userquery("Selection?", options)
13405                         if idx == "X":
13406                                 sys.exit(0)
13407                         pkg = pkgs[int(idx)-1]
13408                 else:
13409                         print "The following packages available:"
13410                         for pkg in pkgs:
13411                                 print "* "+pkg
13412                         print "\nPlease use a specific atom or the --ask option."
13413                         sys.exit(1)
13414         else:
13415                 pkg = pkgs[0]
13416
13417         print
13418         if "--ask" in myopts:
13419                 if userquery("Ready to configure "+pkg+"?") == "No":
13420                         sys.exit(0)
13421         else:
13422                 print "Configuring pkg..."
13423         print
13424         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13425         mysettings = portage.config(clone=settings)
13426         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13427         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13428         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13429                 mysettings,
13430                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13431                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13432         if retval == os.EX_OK:
13433                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13434                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13435         print
13436
13437 def action_info(settings, trees, myopts, myfiles):
13438         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13439                 settings.profile_path, settings["CHOST"],
13440                 trees[settings["ROOT"]]["vartree"].dbapi)
13441         header_width = 65
13442         header_title = "System Settings"
13443         if myfiles:
13444                 print header_width * "="
13445                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13446         print header_width * "="
13447         print "System uname: "+platform.platform(aliased=1)
13448
13449         lastSync = portage.grabfile(os.path.join(
13450                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13451         print "Timestamp of tree:",
13452         if lastSync:
13453                 print lastSync[0]
13454         else:
13455                 print "Unknown"
13456
13457         output=commands.getstatusoutput("distcc --version")
13458         if not output[0]:
13459                 print str(output[1].split("\n",1)[0]),
13460                 if "distcc" in settings.features:
13461                         print "[enabled]"
13462                 else:
13463                         print "[disabled]"
13464
13465         output=commands.getstatusoutput("ccache -V")
13466         if not output[0]:
13467                 print str(output[1].split("\n",1)[0]),
13468                 if "ccache" in settings.features:
13469                         print "[enabled]"
13470                 else:
13471                         print "[disabled]"
13472
13473         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13474                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13475         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13476         myvars  = portage.util.unique_array(myvars)
13477         myvars.sort()
13478
13479         for x in myvars:
13480                 if portage.isvalidatom(x):
13481                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13482                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13483                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13484                         pkgs = []
13485                         for pn, ver, rev in pkg_matches:
13486                                 if rev != "r0":
13487                                         pkgs.append(ver + "-" + rev)
13488                                 else:
13489                                         pkgs.append(ver)
13490                         if pkgs:
13491                                 pkgs = ", ".join(pkgs)
13492                                 print "%-20s %s" % (x+":", pkgs)
13493                 else:
13494                         print "%-20s %s" % (x+":", "[NOT VALID]")
13495
13496         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13497
13498         if "--verbose" in myopts:
13499                 myvars=settings.keys()
13500         else:
13501                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13502                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13503                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13504                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13505
13506                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13507
13508         myvars = portage.util.unique_array(myvars)
13509         unset_vars = []
13510         myvars.sort()
13511         for x in myvars:
13512                 if x in settings:
13513                         if x != "USE":
13514                                 print '%s="%s"' % (x, settings[x])
13515                         else:
13516                                 use = set(settings["USE"].split())
13517                                 use_expand = settings["USE_EXPAND"].split()
13518                                 use_expand.sort()
13519                                 for varname in use_expand:
13520                                         flag_prefix = varname.lower() + "_"
13521                                         for f in list(use):
13522                                                 if f.startswith(flag_prefix):
13523                                                         use.remove(f)
13524                                 use = list(use)
13525                                 use.sort()
13526                                 print 'USE="%s"' % " ".join(use),
13527                                 for varname in use_expand:
13528                                         myval = settings.get(varname)
13529                                         if myval:
13530                                                 print '%s="%s"' % (varname, myval),
13531                                 print
13532                 else:
13533                         unset_vars.append(x)
13534         if unset_vars:
13535                 print "Unset:  "+", ".join(unset_vars)
13536         print
13537
13538         if "--debug" in myopts:
13539                 for x in dir(portage):
13540                         module = getattr(portage, x)
13541                         if "cvs_id_string" in dir(module):
13542                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13543
13544         # See if we can find any packages installed matching the strings
13545         # passed on the command line
13546         mypkgs = []
13547         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13548         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13549         for x in myfiles:
13550                 mypkgs.extend(vardb.match(x))
13551
13552         # If some packages were found...
13553         if mypkgs:
13554                 # Get our global settings (we only print stuff if it varies from
13555                 # the current config)
13556                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13557                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13558                 global_vals = {}
13559                 pkgsettings = portage.config(clone=settings)
13560
13561                 for myvar in mydesiredvars:
13562                         global_vals[myvar] = set(settings.get(myvar, "").split())
13563
13564                 # Loop through each package
13565                 # Only print settings if they differ from global settings
13566                 header_title = "Package Settings"
13567                 print header_width * "="
13568                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13569                 print header_width * "="
13570                 from portage.output import EOutput
13571                 out = EOutput()
13572                 for pkg in mypkgs:
13573                         # Get all package specific variables
13574                         auxvalues = vardb.aux_get(pkg, auxkeys)
13575                         valuesmap = {}
13576                         for i in xrange(len(auxkeys)):
13577                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13578                         diff_values = {}
13579                         for myvar in mydesiredvars:
13580                                 # If the package variable doesn't match the
13581                                 # current global variable, something has changed
13582                                 # so set diff_found so we know to print
13583                                 if valuesmap[myvar] != global_vals[myvar]:
13584                                         diff_values[myvar] = valuesmap[myvar]
13585                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13586                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13587                         pkgsettings.reset()
13588                         # If a matching ebuild is no longer available in the tree, maybe it
13589                         # would make sense to compare against the flags for the best
13590                         # available version with the same slot?
13591                         mydb = None
13592                         if portdb.cpv_exists(pkg):
13593                                 mydb = portdb
13594                         pkgsettings.setcpv(pkg, mydb=mydb)
13595                         if valuesmap["IUSE"].intersection(
13596                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13597                                 diff_values["USE"] = valuesmap["USE"]
13598                         # If a difference was found, print the info for
13599                         # this package.
13600                         if diff_values:
13601                                 # Print package info
13602                                 print "%s was built with the following:" % pkg
13603                                 for myvar in mydesiredvars + ["USE"]:
13604                                         if myvar in diff_values:
13605                                                 mylist = list(diff_values[myvar])
13606                                                 mylist.sort()
13607                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13608                                 print
13609                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13610                         ebuildpath = vardb.findname(pkg)
13611                         if not ebuildpath or not os.path.exists(ebuildpath):
13612                                 out.ewarn("No ebuild found for '%s'" % pkg)
13613                                 continue
13614                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13615                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13616                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13617                                 tree="vartree")
13618
13619 def action_search(root_config, myopts, myfiles, spinner):
13620         if not myfiles:
13621                 print "emerge: no search terms provided."
13622         else:
13623                 searchinstance = search(root_config,
13624                         spinner, "--searchdesc" in myopts,
13625                         "--quiet" not in myopts, "--usepkg" in myopts,
13626                         "--usepkgonly" in myopts)
13627                 for mysearch in myfiles:
13628                         try:
13629                                 searchinstance.execute(mysearch)
13630                         except re.error, comment:
13631                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13632                                 sys.exit(1)
13633                         searchinstance.output()
13634
13635 def action_depclean(settings, trees, ldpath_mtimes,
13636         myopts, action, myfiles, spinner):
13637         # Kill packages that aren't explicitly merged or are required as a
13638         # dependency of another package. World file is explicit.
13639
13640         # Global depclean or prune operations are not very safe when there are
13641         # missing dependencies since it's unknown how badly incomplete
13642         # the dependency graph is, and we might accidentally remove packages
13643         # that should have been pulled into the graph. On the other hand, it's
13644         # relatively safe to ignore missing deps when only asked to remove
13645         # specific packages.
13646         allow_missing_deps = len(myfiles) > 0
13647
13648         msg = []
13649         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13650         msg.append("mistakes. Packages that are part of the world set will always\n")
13651         msg.append("be kept.  They can be manually added to this set with\n")
13652         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13653         msg.append("package.provided (see portage(5)) will be removed by\n")
13654         msg.append("depclean, even if they are part of the world set.\n")
13655         msg.append("\n")
13656         msg.append("As a safety measure, depclean will not remove any packages\n")
13657         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13658         msg.append("consequence, it is often necessary to run %s\n" % \
13659                 good("`emerge --update"))
13660         msg.append(good("--newuse --deep @system @world`") + \
13661                 " prior to depclean.\n")
13662
13663         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13664                 portage.writemsg_stdout("\n")
13665                 for x in msg:
13666                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13667
13668         xterm_titles = "notitles" not in settings.features
13669         myroot = settings["ROOT"]
13670         root_config = trees[myroot]["root_config"]
13671         getSetAtoms = root_config.setconfig.getSetAtoms
13672         vardb = trees[myroot]["vartree"].dbapi
13673
13674         required_set_names = ("system", "world")
13675         required_sets = {}
13676         set_args = []
13677
13678         for s in required_set_names:
13679                 required_sets[s] = InternalPackageSet(
13680                         initial_atoms=getSetAtoms(s))
13681
13682         
13683         # When removing packages, use a temporary version of world
13684         # which excludes packages that are intended to be eligible for
13685         # removal.
13686         world_temp_set = required_sets["world"]
13687         system_set = required_sets["system"]
13688
13689         if not system_set or not world_temp_set:
13690
13691                 if not system_set:
13692                         writemsg_level("!!! You have no system list.\n",
13693                                 level=logging.ERROR, noiselevel=-1)
13694
13695                 if not world_temp_set:
13696                         writemsg_level("!!! You have no world file.\n",
13697                                         level=logging.WARNING, noiselevel=-1)
13698
13699                 writemsg_level("!!! Proceeding is likely to " + \
13700                         "break your installation.\n",
13701                         level=logging.WARNING, noiselevel=-1)
13702                 if "--pretend" not in myopts:
13703                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13704
13705         if action == "depclean":
13706                 emergelog(xterm_titles, " >>> depclean")
13707
13708         import textwrap
13709         args_set = InternalPackageSet()
13710         if myfiles:
13711                 for x in myfiles:
13712                         if not is_valid_package_atom(x):
13713                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13714                                         level=logging.ERROR, noiselevel=-1)
13715                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13716                                 return
13717                         try:
13718                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13719                         except portage.exception.AmbiguousPackageName, e:
13720                                 msg = "The short ebuild name \"" + x + \
13721                                         "\" is ambiguous.  Please specify " + \
13722                                         "one of the following " + \
13723                                         "fully-qualified ebuild names instead:"
13724                                 for line in textwrap.wrap(msg, 70):
13725                                         writemsg_level("!!! %s\n" % (line,),
13726                                                 level=logging.ERROR, noiselevel=-1)
13727                                 for i in e[0]:
13728                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13729                                                 level=logging.ERROR, noiselevel=-1)
13730                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13731                                 return
13732                         args_set.add(atom)
13733                 matched_packages = False
13734                 for x in args_set:
13735                         if vardb.match(x):
13736                                 matched_packages = True
13737                                 break
13738                 if not matched_packages:
13739                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13740                                 action)
13741                         return
13742
13743         writemsg_level("\nCalculating dependencies  ")
13744         resolver_params = create_depgraph_params(myopts, "remove")
13745         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13746         vardb = resolver.trees[myroot]["vartree"].dbapi
13747
13748         if action == "depclean":
13749
13750                 if args_set:
13751                         # Pull in everything that's installed but not matched
13752                         # by an argument atom since we don't want to clean any
13753                         # package if something depends on it.
13754
13755                         world_temp_set.clear()
13756                         for pkg in vardb:
13757                                 spinner.update()
13758
13759                                 try:
13760                                         if args_set.findAtomForPackage(pkg) is None:
13761                                                 world_temp_set.add("=" + pkg.cpv)
13762                                                 continue
13763                                 except portage.exception.InvalidDependString, e:
13764                                         show_invalid_depstring_notice(pkg,
13765                                                 pkg.metadata["PROVIDE"], str(e))
13766                                         del e
13767                                         world_temp_set.add("=" + pkg.cpv)
13768                                         continue
13769
13770         elif action == "prune":
13771
13772                 # Pull in everything that's installed since we don't
13773                 # to prune a package if something depends on it.
13774                 world_temp_set.clear()
13775                 world_temp_set.update(vardb.cp_all())
13776
13777                 if not args_set:
13778
13779                         # Try to prune everything that's slotted.
13780                         for cp in vardb.cp_all():
13781                                 if len(vardb.cp_list(cp)) > 1:
13782                                         args_set.add(cp)
13783
13784                 # Remove atoms from world that match installed packages
13785                 # that are also matched by argument atoms, but do not remove
13786                 # them if they match the highest installed version.
13787                 for pkg in vardb:
13788                         spinner.update()
13789                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13790                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13791                                 raise AssertionError("package expected in matches: " + \
13792                                         "cp = %s, cpv = %s matches = %s" % \
13793                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13794
13795                         highest_version = pkgs_for_cp[-1]
13796                         if pkg == highest_version:
13797                                 # pkg is the highest version
13798                                 world_temp_set.add("=" + pkg.cpv)
13799                                 continue
13800
13801                         if len(pkgs_for_cp) <= 1:
13802                                 raise AssertionError("more packages expected: " + \
13803                                         "cp = %s, cpv = %s matches = %s" % \
13804                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13805
13806                         try:
13807                                 if args_set.findAtomForPackage(pkg) is None:
13808                                         world_temp_set.add("=" + pkg.cpv)
13809                                         continue
13810                         except portage.exception.InvalidDependString, e:
13811                                 show_invalid_depstring_notice(pkg,
13812                                         pkg.metadata["PROVIDE"], str(e))
13813                                 del e
13814                                 world_temp_set.add("=" + pkg.cpv)
13815                                 continue
13816
13817         set_args = {}
13818         for s, package_set in required_sets.iteritems():
13819                 set_atom = SETPREFIX + s
13820                 set_arg = SetArg(arg=set_atom, set=package_set,
13821                         root_config=resolver.roots[myroot])
13822                 set_args[s] = set_arg
13823                 for atom in set_arg.set:
13824                         resolver._dep_stack.append(
13825                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13826                         resolver.digraph.add(set_arg, None)
13827
13828         success = resolver._complete_graph()
13829         writemsg_level("\b\b... done!\n")
13830
13831         resolver.display_problems()
13832
13833         if not success:
13834                 return 1
13835
13836         def unresolved_deps():
13837
13838                 unresolvable = set()
13839                 for dep in resolver._initially_unsatisfied_deps:
13840                         if isinstance(dep.parent, Package) and \
13841                                 (dep.priority > UnmergeDepPriority.SOFT):
13842                                 unresolvable.add((dep.atom, dep.parent.cpv))
13843
13844                 if not unresolvable:
13845                         return False
13846
13847                 if unresolvable and not allow_missing_deps:
13848                         prefix = bad(" * ")
13849                         msg = []
13850                         msg.append("Dependencies could not be completely resolved due to")
13851                         msg.append("the following required packages not being installed:")
13852                         msg.append("")
13853                         for atom, parent in unresolvable:
13854                                 msg.append("  %s pulled in by:" % (atom,))
13855                                 msg.append("    %s" % (parent,))
13856                                 msg.append("")
13857                         msg.append("Have you forgotten to run " + \
13858                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13859                         msg.append(("to %s? It may be necessary to manually " + \
13860                                 "uninstall packages that no longer") % action)
13861                         msg.append("exist in the portage tree since " + \
13862                                 "it may not be possible to satisfy their")
13863                         msg.append("dependencies.  Also, be aware of " + \
13864                                 "the --with-bdeps option that is documented")
13865                         msg.append("in " + good("`man emerge`") + ".")
13866                         if action == "prune":
13867                                 msg.append("")
13868                                 msg.append("If you would like to ignore " + \
13869                                         "dependencies then use %s." % good("--nodeps"))
13870                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13871                                 level=logging.ERROR, noiselevel=-1)
13872                         return True
13873                 return False
13874
13875         if unresolved_deps():
13876                 return 1
13877
13878         graph = resolver.digraph.copy()
13879         required_pkgs_total = 0
13880         for node in graph:
13881                 if isinstance(node, Package):
13882                         required_pkgs_total += 1
13883
13884         def show_parents(child_node):
13885                 parent_nodes = graph.parent_nodes(child_node)
13886                 if not parent_nodes:
13887                         # With --prune, the highest version can be pulled in without any
13888                         # real parent since all installed packages are pulled in.  In that
13889                         # case there's nothing to show here.
13890                         return
13891                 parent_strs = []
13892                 for node in parent_nodes:
13893                         parent_strs.append(str(getattr(node, "cpv", node)))
13894                 parent_strs.sort()
13895                 msg = []
13896                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13897                 for parent_str in parent_strs:
13898                         msg.append("    %s\n" % (parent_str,))
13899                 msg.append("\n")
13900                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13901
13902         def cmp_pkg_cpv(pkg1, pkg2):
13903                 """Sort Package instances by cpv."""
13904                 if pkg1.cpv > pkg2.cpv:
13905                         return 1
13906                 elif pkg1.cpv == pkg2.cpv:
13907                         return 0
13908                 else:
13909                         return -1
13910
13911         def create_cleanlist():
13912                 pkgs_to_remove = []
13913
13914                 if action == "depclean":
13915                         if args_set:
13916
13917                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13918                                         arg_atom = None
13919                                         try:
13920                                                 arg_atom = args_set.findAtomForPackage(pkg)
13921                                         except portage.exception.InvalidDependString:
13922                                                 # this error has already been displayed by now
13923                                                 continue
13924
13925                                         if arg_atom:
13926                                                 if pkg not in graph:
13927                                                         pkgs_to_remove.append(pkg)
13928                                                 elif "--verbose" in myopts:
13929                                                         show_parents(pkg)
13930
13931                         else:
13932                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13933                                         if pkg not in graph:
13934                                                 pkgs_to_remove.append(pkg)
13935                                         elif "--verbose" in myopts:
13936                                                 show_parents(pkg)
13937
13938                 elif action == "prune":
13939                         # Prune really uses all installed instead of world. It's not
13940                         # a real reverse dependency so don't display it as such.
13941                         graph.remove(set_args["world"])
13942
13943                         for atom in args_set:
13944                                 for pkg in vardb.match_pkgs(atom):
13945                                         if pkg not in graph:
13946                                                 pkgs_to_remove.append(pkg)
13947                                         elif "--verbose" in myopts:
13948                                                 show_parents(pkg)
13949
13950                 if not pkgs_to_remove:
13951                         writemsg_level(
13952                                 ">>> No packages selected for removal by %s\n" % action)
13953                         if "--verbose" not in myopts:
13954                                 writemsg_level(
13955                                         ">>> To see reverse dependencies, use %s\n" % \
13956                                                 good("--verbose"))
13957                         if action == "prune":
13958                                 writemsg_level(
13959                                         ">>> To ignore dependencies, use %s\n" % \
13960                                                 good("--nodeps"))
13961
13962                 return pkgs_to_remove
13963
13964         cleanlist = create_cleanlist()
13965
13966         if len(cleanlist):
13967                 clean_set = set(cleanlist)
13968
13969                 # Check if any of these package are the sole providers of libraries
13970                 # with consumers that have not been selected for removal. If so, these
13971                 # packages and any dependencies need to be added to the graph.
13972                 real_vardb = trees[myroot]["vartree"].dbapi
13973                 linkmap = real_vardb.linkmap
13974                 liblist = linkmap.listLibraryObjects()
13975                 consumer_cache = {}
13976                 provider_cache = {}
13977                 soname_cache = {}
13978                 consumer_map = {}
13979
13980                 writemsg_level(">>> Checking for lib consumers...\n")
13981
13982                 for pkg in cleanlist:
13983                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13984                         provided_libs = set()
13985
13986                         for lib in liblist:
13987                                 if pkg_dblink.isowner(lib, myroot):
13988                                         provided_libs.add(lib)
13989
13990                         if not provided_libs:
13991                                 continue
13992
13993                         consumers = {}
13994                         for lib in provided_libs:
13995                                 lib_consumers = consumer_cache.get(lib)
13996                                 if lib_consumers is None:
13997                                         lib_consumers = linkmap.findConsumers(lib)
13998                                         consumer_cache[lib] = lib_consumers
13999                                 if lib_consumers:
14000                                         consumers[lib] = lib_consumers
14001
14002                         if not consumers:
14003                                 continue
14004
14005                         for lib, lib_consumers in consumers.items():
14006                                 for consumer_file in list(lib_consumers):
14007                                         if pkg_dblink.isowner(consumer_file, myroot):
14008                                                 lib_consumers.remove(consumer_file)
14009                                 if not lib_consumers:
14010                                         del consumers[lib]
14011
14012                         if not consumers:
14013                                 continue
14014
14015                         for lib, lib_consumers in consumers.iteritems():
14016
14017                                 soname = soname_cache.get(lib)
14018                                 if soname is None:
14019                                         soname = linkmap.getSoname(lib)
14020                                         soname_cache[lib] = soname
14021
14022                                 consumer_providers = []
14023                                 for lib_consumer in lib_consumers:
14024                                         providers = provider_cache.get(lib)
14025                                         if providers is None:
14026                                                 providers = linkmap.findProviders(lib_consumer)
14027                                                 provider_cache[lib_consumer] = providers
14028                                         if soname not in providers:
14029                                                 # Why does this happen?
14030                                                 continue
14031                                         consumer_providers.append(
14032                                                 (lib_consumer, providers[soname]))
14033
14034                                 consumers[lib] = consumer_providers
14035
14036                         consumer_map[pkg] = consumers
14037
14038                 if consumer_map:
14039
14040                         search_files = set()
14041                         for consumers in consumer_map.itervalues():
14042                                 for lib, consumer_providers in consumers.iteritems():
14043                                         for lib_consumer, providers in consumer_providers:
14044                                                 search_files.add(lib_consumer)
14045                                                 search_files.update(providers)
14046
14047                         writemsg_level(">>> Assigning files to packages...\n")
14048                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14049
14050                         for pkg, consumers in consumer_map.items():
14051                                 for lib, consumer_providers in consumers.items():
14052                                         lib_consumers = set()
14053
14054                                         for lib_consumer, providers in consumer_providers:
14055                                                 owner_set = file_owners.get(lib_consumer)
14056                                                 provider_dblinks = set()
14057                                                 provider_pkgs = set()
14058
14059                                                 if len(providers) > 1:
14060                                                         for provider in providers:
14061                                                                 provider_set = file_owners.get(provider)
14062                                                                 if provider_set is not None:
14063                                                                         provider_dblinks.update(provider_set)
14064
14065                                                 if len(provider_dblinks) > 1:
14066                                                         for provider_dblink in provider_dblinks:
14067                                                                 pkg_key = ("installed", myroot,
14068                                                                         provider_dblink.mycpv, "nomerge")
14069                                                                 if pkg_key not in clean_set:
14070                                                                         provider_pkgs.add(vardb.get(pkg_key))
14071
14072                                                 if provider_pkgs:
14073                                                         continue
14074
14075                                                 if owner_set is not None:
14076                                                         lib_consumers.update(owner_set)
14077
14078                                         for consumer_dblink in list(lib_consumers):
14079                                                 if ("installed", myroot, consumer_dblink.mycpv,
14080                                                         "nomerge") in clean_set:
14081                                                         lib_consumers.remove(consumer_dblink)
14082                                                         continue
14083
14084                                         if lib_consumers:
14085                                                 consumers[lib] = lib_consumers
14086                                         else:
14087                                                 del consumers[lib]
14088                                 if not consumers:
14089                                         del consumer_map[pkg]
14090
14091                 if consumer_map:
14092                         # TODO: Implement a package set for rebuilding consumer packages.
14093
14094                         msg = "In order to avoid breakage of link level " + \
14095                                 "dependencies, one or more packages will not be removed. " + \
14096                                 "This can be solved by rebuilding " + \
14097                                 "the packages that pulled them in."
14098
14099                         prefix = bad(" * ")
14100                         from textwrap import wrap
14101                         writemsg_level("".join(prefix + "%s\n" % line for \
14102                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14103
14104                         msg = []
14105                         for pkg, consumers in consumer_map.iteritems():
14106                                 unique_consumers = set(chain(*consumers.values()))
14107                                 unique_consumers = sorted(consumer.mycpv \
14108                                         for consumer in unique_consumers)
14109                                 msg.append("")
14110                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14111                                 for consumer in unique_consumers:
14112                                         msg.append("    %s" % (consumer,))
14113                         msg.append("")
14114                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14115                                 level=logging.WARNING, noiselevel=-1)
14116
14117                         # Add lib providers to the graph as children of lib consumers,
14118                         # and also add any dependencies pulled in by the provider.
14119                         writemsg_level(">>> Adding lib providers to graph...\n")
14120
14121                         for pkg, consumers in consumer_map.iteritems():
14122                                 for consumer_dblink in set(chain(*consumers.values())):
14123                                         consumer_pkg = vardb.get(("installed", myroot,
14124                                                 consumer_dblink.mycpv, "nomerge"))
14125                                         if not resolver._add_pkg(pkg,
14126                                                 Dependency(parent=consumer_pkg,
14127                                                 priority=UnmergeDepPriority(runtime=True),
14128                                                 root=pkg.root)):
14129                                                 resolver.display_problems()
14130                                                 return 1
14131
14132                         writemsg_level("\nCalculating dependencies  ")
14133                         success = resolver._complete_graph()
14134                         writemsg_level("\b\b... done!\n")
14135                         resolver.display_problems()
14136                         if not success:
14137                                 return 1
14138                         if unresolved_deps():
14139                                 return 1
14140
14141                         graph = resolver.digraph.copy()
14142                         required_pkgs_total = 0
14143                         for node in graph:
14144                                 if isinstance(node, Package):
14145                                         required_pkgs_total += 1
14146                         cleanlist = create_cleanlist()
14147                         if not cleanlist:
14148                                 return 0
14149                         clean_set = set(cleanlist)
14150
14151                 # Use a topological sort to create an unmerge order such that
14152                 # each package is unmerged before it's dependencies. This is
14153                 # necessary to avoid breaking things that may need to run
14154                 # during pkg_prerm or pkg_postrm phases.
14155
14156                 # Create a new graph to account for dependencies between the
14157                 # packages being unmerged.
14158                 graph = digraph()
14159                 del cleanlist[:]
14160
14161                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14162                 runtime = UnmergeDepPriority(runtime=True)
14163                 runtime_post = UnmergeDepPriority(runtime_post=True)
14164                 buildtime = UnmergeDepPriority(buildtime=True)
14165                 priority_map = {
14166                         "RDEPEND": runtime,
14167                         "PDEPEND": runtime_post,
14168                         "DEPEND": buildtime,
14169                 }
14170
14171                 for node in clean_set:
14172                         graph.add(node, None)
14173                         mydeps = []
14174                         node_use = node.metadata["USE"].split()
14175                         for dep_type in dep_keys:
14176                                 depstr = node.metadata[dep_type]
14177                                 if not depstr:
14178                                         continue
14179                                 try:
14180                                         portage.dep._dep_check_strict = False
14181                                         success, atoms = portage.dep_check(depstr, None, settings,
14182                                                 myuse=node_use, trees=resolver._graph_trees,
14183                                                 myroot=myroot)
14184                                 finally:
14185                                         portage.dep._dep_check_strict = True
14186                                 if not success:
14187                                         # Ignore invalid deps of packages that will
14188                                         # be uninstalled anyway.
14189                                         continue
14190
14191                                 priority = priority_map[dep_type]
14192                                 for atom in atoms:
14193                                         if not isinstance(atom, portage.dep.Atom):
14194                                                 # Ignore invalid atoms returned from dep_check().
14195                                                 continue
14196                                         if atom.blocker:
14197                                                 continue
14198                                         matches = vardb.match_pkgs(atom)
14199                                         if not matches:
14200                                                 continue
14201                                         for child_node in matches:
14202                                                 if child_node in clean_set:
14203                                                         graph.add(child_node, node, priority=priority)
14204
14205                 ordered = True
14206                 if len(graph.order) == len(graph.root_nodes()):
14207                         # If there are no dependencies between packages
14208                         # let unmerge() group them by cat/pn.
14209                         ordered = False
14210                         cleanlist = [pkg.cpv for pkg in graph.order]
14211                 else:
14212                         # Order nodes from lowest to highest overall reference count for
14213                         # optimal root node selection.
14214                         node_refcounts = {}
14215                         for node in graph.order:
14216                                 node_refcounts[node] = len(graph.parent_nodes(node))
14217                         def cmp_reference_count(node1, node2):
14218                                 return node_refcounts[node1] - node_refcounts[node2]
14219                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14220         
14221                         ignore_priority_range = [None]
14222                         ignore_priority_range.extend(
14223                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14224                         while not graph.empty():
14225                                 for ignore_priority in ignore_priority_range:
14226                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14227                                         if nodes:
14228                                                 break
14229                                 if not nodes:
14230                                         raise AssertionError("no root nodes")
14231                                 if ignore_priority is not None:
14232                                         # Some deps have been dropped due to circular dependencies,
14233                                         # so only pop one node in order do minimize the number that
14234                                         # are dropped.
14235                                         del nodes[1:]
14236                                 for node in nodes:
14237                                         graph.remove(node)
14238                                         cleanlist.append(node.cpv)
14239
14240                 unmerge(root_config, myopts, "unmerge", cleanlist,
14241                         ldpath_mtimes, ordered=ordered)
14242
14243         if action == "prune":
14244                 return
14245
14246         if not cleanlist and "--quiet" in myopts:
14247                 return
14248
14249         print "Packages installed:   "+str(len(vardb.cpv_all()))
14250         print "Packages in world:    " + \
14251                 str(len(root_config.sets["world"].getAtoms()))
14252         print "Packages in system:   " + \
14253                 str(len(root_config.sets["system"].getAtoms()))
14254         print "Required packages:    "+str(required_pkgs_total)
14255         if "--pretend" in myopts:
14256                 print "Number to remove:     "+str(len(cleanlist))
14257         else:
14258                 print "Number removed:       "+str(len(cleanlist))
14259
14260 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14261         """
14262         Construct a depgraph for the given resume list. This will raise
14263         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14264         @rtype: tuple
14265         @returns: (success, depgraph, dropped_tasks)
14266         """
14267         skip_masked = True
14268         skip_unsatisfied = True
14269         mergelist = mtimedb["resume"]["mergelist"]
14270         dropped_tasks = set()
14271         while True:
14272                 mydepgraph = depgraph(settings, trees,
14273                         myopts, myparams, spinner)
14274                 try:
14275                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14276                                 skip_masked=skip_masked)
14277                 except depgraph.UnsatisfiedResumeDep, e:
14278                         if not skip_unsatisfied:
14279                                 raise
14280
14281                         graph = mydepgraph.digraph
14282                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14283                                 for dep in e.value)
14284                         traversed_nodes = set()
14285                         unsatisfied_stack = list(unsatisfied_parents)
14286                         while unsatisfied_stack:
14287                                 pkg = unsatisfied_stack.pop()
14288                                 if pkg in traversed_nodes:
14289                                         continue
14290                                 traversed_nodes.add(pkg)
14291
14292                                 # If this package was pulled in by a parent
14293                                 # package scheduled for merge, removing this
14294                                 # package may cause the the parent package's
14295                                 # dependency to become unsatisfied.
14296                                 for parent_node in graph.parent_nodes(pkg):
14297                                         if not isinstance(parent_node, Package) \
14298                                                 or parent_node.operation not in ("merge", "nomerge"):
14299                                                 continue
14300                                         unsatisfied = \
14301                                                 graph.child_nodes(parent_node,
14302                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14303                                         if pkg in unsatisfied:
14304                                                 unsatisfied_parents[parent_node] = parent_node
14305                                                 unsatisfied_stack.append(parent_node)
14306
14307                         pruned_mergelist = []
14308                         for x in mergelist:
14309                                 if isinstance(x, list) and \
14310                                         tuple(x) not in unsatisfied_parents:
14311                                         pruned_mergelist.append(x)
14312
14313                         # If the mergelist doesn't shrink then this loop is infinite.
14314                         if len(pruned_mergelist) == len(mergelist):
14315                                 # This happens if a package can't be dropped because
14316                                 # it's already installed, but it has unsatisfied PDEPEND.
14317                                 raise
14318                         mergelist[:] = pruned_mergelist
14319
14320                         # Exclude installed packages that have been removed from the graph due
14321                         # to failure to build/install runtime dependencies after the dependent
14322                         # package has already been installed.
14323                         dropped_tasks.update(pkg for pkg in \
14324                                 unsatisfied_parents if pkg.operation != "nomerge")
14325                         mydepgraph.break_refs(unsatisfied_parents)
14326
14327                         del e, graph, traversed_nodes, \
14328                                 unsatisfied_parents, unsatisfied_stack
14329                         continue
14330                 else:
14331                         break
14332         return (success, mydepgraph, dropped_tasks)
14333
14334 def action_build(settings, trees, mtimedb,
14335         myopts, myaction, myfiles, spinner):
14336
14337         # validate the state of the resume data
14338         # so that we can make assumptions later.
14339         for k in ("resume", "resume_backup"):
14340                 if k not in mtimedb:
14341                         continue
14342                 resume_data = mtimedb[k]
14343                 if not isinstance(resume_data, dict):
14344                         del mtimedb[k]
14345                         continue
14346                 mergelist = resume_data.get("mergelist")
14347                 if not isinstance(mergelist, list):
14348                         del mtimedb[k]
14349                         continue
14350                 for x in mergelist:
14351                         if not (isinstance(x, list) and len(x) == 4):
14352                                 continue
14353                         pkg_type, pkg_root, pkg_key, pkg_action = x
14354                         if pkg_root not in trees:
14355                                 # Current $ROOT setting differs,
14356                                 # so the list must be stale.
14357                                 mergelist = None
14358                                 break
14359                 if not mergelist:
14360                         del mtimedb[k]
14361                         continue
14362                 resume_opts = resume_data.get("myopts")
14363                 if not isinstance(resume_opts, (dict, list)):
14364                         del mtimedb[k]
14365                         continue
14366                 favorites = resume_data.get("favorites")
14367                 if not isinstance(favorites, list):
14368                         del mtimedb[k]
14369                         continue
14370
14371         resume = False
14372         if "--resume" in myopts and \
14373                 ("resume" in mtimedb or
14374                 "resume_backup" in mtimedb):
14375                 resume = True
14376                 if "resume" not in mtimedb:
14377                         mtimedb["resume"] = mtimedb["resume_backup"]
14378                         del mtimedb["resume_backup"]
14379                         mtimedb.commit()
14380                 # "myopts" is a list for backward compatibility.
14381                 resume_opts = mtimedb["resume"].get("myopts", [])
14382                 if isinstance(resume_opts, list):
14383                         resume_opts = dict((k,True) for k in resume_opts)
14384                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14385                         resume_opts.pop(opt, None)
14386                 myopts.update(resume_opts)
14387
14388                 if "--debug" in myopts:
14389                         writemsg_level("myopts %s\n" % (myopts,))
14390
14391                 # Adjust config according to options of the command being resumed.
14392                 for myroot in trees:
14393                         mysettings =  trees[myroot]["vartree"].settings
14394                         mysettings.unlock()
14395                         adjust_config(myopts, mysettings)
14396                         mysettings.lock()
14397                         del myroot, mysettings
14398
14399         ldpath_mtimes = mtimedb["ldpath"]
14400         favorites=[]
14401         merge_count = 0
14402         buildpkgonly = "--buildpkgonly" in myopts
14403         pretend = "--pretend" in myopts
14404         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14405         ask = "--ask" in myopts
14406         nodeps = "--nodeps" in myopts
14407         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14408         tree = "--tree" in myopts
14409         if nodeps and tree:
14410                 tree = False
14411                 del myopts["--tree"]
14412                 portage.writemsg(colorize("WARN", " * ") + \
14413                         "--tree is broken with --nodeps. Disabling...\n")
14414         debug = "--debug" in myopts
14415         verbose = "--verbose" in myopts
14416         quiet = "--quiet" in myopts
14417         if pretend or fetchonly:
14418                 # make the mtimedb readonly
14419                 mtimedb.filename = None
14420         if '--digest' in myopts or 'digest' in settings.features:
14421                 if '--digest' in myopts:
14422                         msg = "The --digest option"
14423                 else:
14424                         msg = "The FEATURES=digest setting"
14425
14426                 msg += " can prevent corruption from being" + \
14427                         " noticed. The `repoman manifest` command is the preferred" + \
14428                         " way to generate manifests and it is capable of doing an" + \
14429                         " entire repository or category at once."
14430                 prefix = bad(" * ")
14431                 writemsg(prefix + "\n")
14432                 from textwrap import wrap
14433                 for line in wrap(msg, 72):
14434                         writemsg("%s%s\n" % (prefix, line))
14435                 writemsg(prefix + "\n")
14436
14437         if "--quiet" not in myopts and \
14438                 ("--pretend" in myopts or "--ask" in myopts or \
14439                 "--tree" in myopts or "--verbose" in myopts):
14440                 action = ""
14441                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14442                         action = "fetched"
14443                 elif "--buildpkgonly" in myopts:
14444                         action = "built"
14445                 else:
14446                         action = "merged"
14447                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14448                         print
14449                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14450                         print
14451                 else:
14452                         print
14453                         print darkgreen("These are the packages that would be %s, in order:") % action
14454                         print
14455
14456         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14457         if not show_spinner:
14458                 spinner.update = spinner.update_quiet
14459
14460         if resume:
14461                 favorites = mtimedb["resume"].get("favorites")
14462                 if not isinstance(favorites, list):
14463                         favorites = []
14464
14465                 if show_spinner:
14466                         print "Calculating dependencies  ",
14467                 myparams = create_depgraph_params(myopts, myaction)
14468
14469                 resume_data = mtimedb["resume"]
14470                 mergelist = resume_data["mergelist"]
14471                 if mergelist and "--skipfirst" in myopts:
14472                         for i, task in enumerate(mergelist):
14473                                 if isinstance(task, list) and \
14474                                         task and task[-1] == "merge":
14475                                         del mergelist[i]
14476                                         break
14477
14478                 success = False
14479                 mydepgraph = None
14480                 try:
14481                         success, mydepgraph, dropped_tasks = resume_depgraph(
14482                                 settings, trees, mtimedb, myopts, myparams, spinner)
14483                 except (portage.exception.PackageNotFound,
14484                         depgraph.UnsatisfiedResumeDep), e:
14485                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14486                                 mydepgraph = e.depgraph
14487                         if show_spinner:
14488                                 print
14489                         from textwrap import wrap
14490                         from portage.output import EOutput
14491                         out = EOutput()
14492
14493                         resume_data = mtimedb["resume"]
14494                         mergelist = resume_data.get("mergelist")
14495                         if not isinstance(mergelist, list):
14496                                 mergelist = []
14497                         if mergelist and debug or (verbose and not quiet):
14498                                 out.eerror("Invalid resume list:")
14499                                 out.eerror("")
14500                                 indent = "  "
14501                                 for task in mergelist:
14502                                         if isinstance(task, list):
14503                                                 out.eerror(indent + str(tuple(task)))
14504                                 out.eerror("")
14505
14506                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14507                                 out.eerror("One or more packages are either masked or " + \
14508                                         "have missing dependencies:")
14509                                 out.eerror("")
14510                                 indent = "  "
14511                                 for dep in e.value:
14512                                         if dep.atom is None:
14513                                                 out.eerror(indent + "Masked package:")
14514                                                 out.eerror(2 * indent + str(dep.parent))
14515                                                 out.eerror("")
14516                                         else:
14517                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14518                                                 out.eerror(2 * indent + str(dep.parent))
14519                                                 out.eerror("")
14520                                 msg = "The resume list contains packages " + \
14521                                         "that are either masked or have " + \
14522                                         "unsatisfied dependencies. " + \
14523                                         "Please restart/continue " + \
14524                                         "the operation manually, or use --skipfirst " + \
14525                                         "to skip the first package in the list and " + \
14526                                         "any other packages that may be " + \
14527                                         "masked or have missing dependencies."
14528                                 for line in wrap(msg, 72):
14529                                         out.eerror(line)
14530                         elif isinstance(e, portage.exception.PackageNotFound):
14531                                 out.eerror("An expected package is " + \
14532                                         "not available: %s" % str(e))
14533                                 out.eerror("")
14534                                 msg = "The resume list contains one or more " + \
14535                                         "packages that are no longer " + \
14536                                         "available. Please restart/continue " + \
14537                                         "the operation manually."
14538                                 for line in wrap(msg, 72):
14539                                         out.eerror(line)
14540                 else:
14541                         if show_spinner:
14542                                 print "\b\b... done!"
14543
14544                 if success:
14545                         if dropped_tasks:
14546                                 portage.writemsg("!!! One or more packages have been " + \
14547                                         "dropped due to\n" + \
14548                                         "!!! masking or unsatisfied dependencies:\n\n",
14549                                         noiselevel=-1)
14550                                 for task in dropped_tasks:
14551                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14552                                 portage.writemsg("\n", noiselevel=-1)
14553                         del dropped_tasks
14554                 else:
14555                         if mydepgraph is not None:
14556                                 mydepgraph.display_problems()
14557                         if not (ask or pretend):
14558                                 # delete the current list and also the backup
14559                                 # since it's probably stale too.
14560                                 for k in ("resume", "resume_backup"):
14561                                         mtimedb.pop(k, None)
14562                                 mtimedb.commit()
14563
14564                         return 1
14565         else:
14566                 if ("--resume" in myopts):
14567                         print darkgreen("emerge: It seems we have nothing to resume...")
14568                         return os.EX_OK
14569
14570                 myparams = create_depgraph_params(myopts, myaction)
14571                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14572                         print "Calculating dependencies  ",
14573                         sys.stdout.flush()
14574                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14575                 try:
14576                         retval, favorites = mydepgraph.select_files(myfiles)
14577                 except portage.exception.PackageNotFound, e:
14578                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14579                         return 1
14580                 except portage.exception.PackageSetNotFound, e:
14581                         root_config = trees[settings["ROOT"]]["root_config"]
14582                         display_missing_pkg_set(root_config, e.value)
14583                         return 1
14584                 if show_spinner:
14585                         print "\b\b... done!"
14586                 if not retval:
14587                         mydepgraph.display_problems()
14588                         return 1
14589
14590         if "--pretend" not in myopts and \
14591                 ("--ask" in myopts or "--tree" in myopts or \
14592                 "--verbose" in myopts) and \
14593                 not ("--quiet" in myopts and "--ask" not in myopts):
14594                 if "--resume" in myopts:
14595                         mymergelist = mydepgraph.altlist()
14596                         if len(mymergelist) == 0:
14597                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14598                                 return os.EX_OK
14599                         favorites = mtimedb["resume"]["favorites"]
14600                         retval = mydepgraph.display(
14601                                 mydepgraph.altlist(reversed=tree),
14602                                 favorites=favorites)
14603                         mydepgraph.display_problems()
14604                         if retval != os.EX_OK:
14605                                 return retval
14606                         prompt="Would you like to resume merging these packages?"
14607                 else:
14608                         retval = mydepgraph.display(
14609                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14610                                 favorites=favorites)
14611                         mydepgraph.display_problems()
14612                         if retval != os.EX_OK:
14613                                 return retval
14614                         mergecount=0
14615                         for x in mydepgraph.altlist():
14616                                 if isinstance(x, Package) and x.operation == "merge":
14617                                         mergecount += 1
14618
14619                         if mergecount==0:
14620                                 sets = trees[settings["ROOT"]]["root_config"].sets
14621                                 world_candidates = None
14622                                 if "--noreplace" in myopts and \
14623                                         not oneshot and favorites:
14624                                         # Sets that are not world candidates are filtered
14625                                         # out here since the favorites list needs to be
14626                                         # complete for depgraph.loadResumeCommand() to
14627                                         # operate correctly.
14628                                         world_candidates = [x for x in favorites \
14629                                                 if not (x.startswith(SETPREFIX) and \
14630                                                 not sets[x[1:]].world_candidate)]
14631                                 if "--noreplace" in myopts and \
14632                                         not oneshot and world_candidates:
14633                                         print
14634                                         for x in world_candidates:
14635                                                 print " %s %s" % (good("*"), x)
14636                                         prompt="Would you like to add these packages to your world favorites?"
14637                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14638                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14639                                 else:
14640                                         print
14641                                         print "Nothing to merge; quitting."
14642                                         print
14643                                         return os.EX_OK
14644                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14645                                 prompt="Would you like to fetch the source files for these packages?"
14646                         else:
14647                                 prompt="Would you like to merge these packages?"
14648                 print
14649                 if "--ask" in myopts and userquery(prompt) == "No":
14650                         print
14651                         print "Quitting."
14652                         print
14653                         return os.EX_OK
14654                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14655                 myopts.pop("--ask", None)
14656
14657         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14658                 if ("--resume" in myopts):
14659                         mymergelist = mydepgraph.altlist()
14660                         if len(mymergelist) == 0:
14661                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14662                                 return os.EX_OK
14663                         favorites = mtimedb["resume"]["favorites"]
14664                         retval = mydepgraph.display(
14665                                 mydepgraph.altlist(reversed=tree),
14666                                 favorites=favorites)
14667                         mydepgraph.display_problems()
14668                         if retval != os.EX_OK:
14669                                 return retval
14670                 else:
14671                         retval = mydepgraph.display(
14672                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14673                                 favorites=favorites)
14674                         mydepgraph.display_problems()
14675                         if retval != os.EX_OK:
14676                                 return retval
14677                         if "--buildpkgonly" in myopts:
14678                                 graph_copy = mydepgraph.digraph.clone()
14679                                 removed_nodes = set()
14680                                 for node in graph_copy:
14681                                         if not isinstance(node, Package) or \
14682                                                 node.operation == "nomerge":
14683                                                 removed_nodes.add(node)
14684                                 graph_copy.difference_update(removed_nodes)
14685                                 if not graph_copy.hasallzeros(ignore_priority = \
14686                                         DepPrioritySatisfiedRange.ignore_medium):
14687                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14688                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14689                                         return 1
14690         else:
14691                 if "--buildpkgonly" in myopts:
14692                         graph_copy = mydepgraph.digraph.clone()
14693                         removed_nodes = set()
14694                         for node in graph_copy:
14695                                 if not isinstance(node, Package) or \
14696                                         node.operation == "nomerge":
14697                                         removed_nodes.add(node)
14698                         graph_copy.difference_update(removed_nodes)
14699                         if not graph_copy.hasallzeros(ignore_priority = \
14700                                 DepPrioritySatisfiedRange.ignore_medium):
14701                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14702                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14703                                 return 1
14704
14705                 if ("--resume" in myopts):
14706                         favorites=mtimedb["resume"]["favorites"]
14707                         mymergelist = mydepgraph.altlist()
14708                         mydepgraph.break_refs(mymergelist)
14709                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14710                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14711                         del mydepgraph, mymergelist
14712                         clear_caches(trees)
14713
14714                         retval = mergetask.merge()
14715                         merge_count = mergetask.curval
14716                 else:
14717                         if "resume" in mtimedb and \
14718                         "mergelist" in mtimedb["resume"] and \
14719                         len(mtimedb["resume"]["mergelist"]) > 1:
14720                                 mtimedb["resume_backup"] = mtimedb["resume"]
14721                                 del mtimedb["resume"]
14722                                 mtimedb.commit()
14723                         mtimedb["resume"]={}
14724                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14725                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14726                         # a list type for options.
14727                         mtimedb["resume"]["myopts"] = myopts.copy()
14728
14729                         # Convert Atom instances to plain str.
14730                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14731
14732                         pkglist = mydepgraph.altlist()
14733                         mydepgraph.saveNomergeFavorites()
14734                         mydepgraph.break_refs(pkglist)
14735                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14736                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14737                         del mydepgraph, pkglist
14738                         clear_caches(trees)
14739
14740                         retval = mergetask.merge()
14741                         merge_count = mergetask.curval
14742
14743                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14744                         if "yes" == settings.get("AUTOCLEAN"):
14745                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14746                                 unmerge(trees[settings["ROOT"]]["root_config"],
14747                                         myopts, "clean", [],
14748                                         ldpath_mtimes, autoclean=1)
14749                         else:
14750                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14751                                         + " AUTOCLEAN is disabled.  This can cause serious"
14752                                         + " problems due to overlapping packages.\n")
14753                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14754
14755                 return retval
14756
14757 def multiple_actions(action1, action2):
14758         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14759         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14760         sys.exit(1)
14761
14762 def insert_optional_args(args):
14763         """
14764         Parse optional arguments and insert a value if one has
14765         not been provided. This is done before feeding the args
14766         to the optparse parser since that parser does not support
14767         this feature natively.
14768         """
14769
14770         new_args = []
14771         jobs_opts = ("-j", "--jobs")
14772         arg_stack = args[:]
14773         arg_stack.reverse()
14774         while arg_stack:
14775                 arg = arg_stack.pop()
14776
14777                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14778                 if not (short_job_opt or arg in jobs_opts):
14779                         new_args.append(arg)
14780                         continue
14781
14782                 # Insert an empty placeholder in order to
14783                 # satisfy the requirements of optparse.
14784
14785                 new_args.append("--jobs")
14786                 job_count = None
14787                 saved_opts = None
14788                 if short_job_opt and len(arg) > 2:
14789                         if arg[:2] == "-j":
14790                                 try:
14791                                         job_count = int(arg[2:])
14792                                 except ValueError:
14793                                         saved_opts = arg[2:]
14794                         else:
14795                                 job_count = "True"
14796                                 saved_opts = arg[1:].replace("j", "")
14797
14798                 if job_count is None and arg_stack:
14799                         try:
14800                                 job_count = int(arg_stack[-1])
14801                         except ValueError:
14802                                 pass
14803                         else:
14804                                 # Discard the job count from the stack
14805                                 # since we're consuming it here.
14806                                 arg_stack.pop()
14807
14808                 if job_count is None:
14809                         # unlimited number of jobs
14810                         new_args.append("True")
14811                 else:
14812                         new_args.append(str(job_count))
14813
14814                 if saved_opts is not None:
14815                         new_args.append("-" + saved_opts)
14816
14817         return new_args
14818
14819 def parse_opts(tmpcmdline, silent=False):
14820         myaction=None
14821         myopts = {}
14822         myfiles=[]
14823
14824         global actions, options, shortmapping
14825
14826         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14827         argument_options = {
14828                 "--config-root": {
14829                         "help":"specify the location for portage configuration files",
14830                         "action":"store"
14831                 },
14832                 "--color": {
14833                         "help":"enable or disable color output",
14834                         "type":"choice",
14835                         "choices":("y", "n")
14836                 },
14837
14838                 "--jobs": {
14839
14840                         "help"   : "Specifies the number of packages to build " + \
14841                                 "simultaneously.",
14842
14843                         "action" : "store"
14844                 },
14845
14846                 "--load-average": {
14847
14848                         "help"   :"Specifies that no new builds should be started " + \
14849                                 "if there are other builds running and the load average " + \
14850                                 "is at least LOAD (a floating-point number).",
14851
14852                         "action" : "store"
14853                 },
14854
14855                 "--with-bdeps": {
14856                         "help":"include unnecessary build time dependencies",
14857                         "type":"choice",
14858                         "choices":("y", "n")
14859                 },
14860                 "--reinstall": {
14861                         "help":"specify conditions to trigger package reinstallation",
14862                         "type":"choice",
14863                         "choices":["changed-use"]
14864                 },
14865                 "--root": {
14866                  "help"   : "specify the target root filesystem for merging packages",
14867                  "action" : "store"
14868                 },
14869         }
14870
14871         from optparse import OptionParser
14872         parser = OptionParser()
14873         if parser.has_option("--help"):
14874                 parser.remove_option("--help")
14875
14876         for action_opt in actions:
14877                 parser.add_option("--" + action_opt, action="store_true",
14878                         dest=action_opt.replace("-", "_"), default=False)
14879         for myopt in options:
14880                 parser.add_option(myopt, action="store_true",
14881                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14882         for shortopt, longopt in shortmapping.iteritems():
14883                 parser.add_option("-" + shortopt, action="store_true",
14884                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14885         for myalias, myopt in longopt_aliases.iteritems():
14886                 parser.add_option(myalias, action="store_true",
14887                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14888
14889         for myopt, kwargs in argument_options.iteritems():
14890                 parser.add_option(myopt,
14891                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14892
14893         tmpcmdline = insert_optional_args(tmpcmdline)
14894
14895         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14896
14897         if myoptions.jobs:
14898                 jobs = None
14899                 if myoptions.jobs == "True":
14900                         jobs = True
14901                 else:
14902                         try:
14903                                 jobs = int(myoptions.jobs)
14904                         except ValueError:
14905                                 jobs = -1
14906
14907                 if jobs is not True and \
14908                         jobs < 1:
14909                         jobs = None
14910                         if not silent:
14911                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14912                                         (myoptions.jobs,), noiselevel=-1)
14913
14914                 myoptions.jobs = jobs
14915
14916         if myoptions.load_average:
14917                 try:
14918                         load_average = float(myoptions.load_average)
14919                 except ValueError:
14920                         load_average = 0.0
14921
14922                 if load_average <= 0.0:
14923                         load_average = None
14924                         if not silent:
14925                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14926                                         (myoptions.load_average,), noiselevel=-1)
14927
14928                 myoptions.load_average = load_average
14929
14930         for myopt in options:
14931                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14932                 if v:
14933                         myopts[myopt] = True
14934
14935         for myopt in argument_options:
14936                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14937                 if v is not None:
14938                         myopts[myopt] = v
14939
14940         if myoptions.searchdesc:
14941                 myoptions.search = True
14942
14943         for action_opt in actions:
14944                 v = getattr(myoptions, action_opt.replace("-", "_"))
14945                 if v:
14946                         if myaction:
14947                                 multiple_actions(myaction, action_opt)
14948                                 sys.exit(1)
14949                         myaction = action_opt
14950
14951         myfiles += myargs
14952
14953         return myaction, myopts, myfiles
14954
14955 def validate_ebuild_environment(trees):
14956         for myroot in trees:
14957                 settings = trees[myroot]["vartree"].settings
14958                 settings.validate()
14959
14960 def clear_caches(trees):
14961         for d in trees.itervalues():
14962                 d["porttree"].dbapi.melt()
14963                 d["porttree"].dbapi._aux_cache.clear()
14964                 d["bintree"].dbapi._aux_cache.clear()
14965                 d["bintree"].dbapi._clear_cache()
14966                 d["vartree"].dbapi.linkmap._clear_cache()
14967         portage.dircache.clear()
14968         gc.collect()
14969
14970 def load_emerge_config(trees=None):
14971         kwargs = {}
14972         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14973                 v = os.environ.get(envvar, None)
14974                 if v and v.strip():
14975                         kwargs[k] = v
14976         trees = portage.create_trees(trees=trees, **kwargs)
14977
14978         for root, root_trees in trees.iteritems():
14979                 settings = root_trees["vartree"].settings
14980                 setconfig = load_default_config(settings, root_trees)
14981                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14982
14983         settings = trees["/"]["vartree"].settings
14984
14985         for myroot in trees:
14986                 if myroot != "/":
14987                         settings = trees[myroot]["vartree"].settings
14988                         break
14989
14990         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14991         mtimedb = portage.MtimeDB(mtimedbfile)
14992         
14993         return settings, trees, mtimedb
14994
14995 def adjust_config(myopts, settings):
14996         """Make emerge specific adjustments to the config."""
14997
14998         # To enhance usability, make some vars case insensitive by forcing them to
14999         # lower case.
15000         for myvar in ("AUTOCLEAN", "NOCOLOR"):
15001                 if myvar in settings:
15002                         settings[myvar] = settings[myvar].lower()
15003                         settings.backup_changes(myvar)
15004         del myvar
15005
15006         # Kill noauto as it will break merges otherwise.
15007         if "noauto" in settings.features:
15008                 settings.features.remove('noauto')
15009                 settings['FEATURES'] = ' '.join(sorted(settings.features))
15010                 settings.backup_changes("FEATURES")
15011
15012         CLEAN_DELAY = 5
15013         try:
15014                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15015         except ValueError, e:
15016                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15017                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15018                         settings["CLEAN_DELAY"], noiselevel=-1)
15019         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15020         settings.backup_changes("CLEAN_DELAY")
15021
15022         EMERGE_WARNING_DELAY = 10
15023         try:
15024                 EMERGE_WARNING_DELAY = int(settings.get(
15025                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15026         except ValueError, e:
15027                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15028                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15029                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15030         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15031         settings.backup_changes("EMERGE_WARNING_DELAY")
15032
15033         if "--quiet" in myopts:
15034                 settings["PORTAGE_QUIET"]="1"
15035                 settings.backup_changes("PORTAGE_QUIET")
15036
15037         if "--verbose" in myopts:
15038                 settings["PORTAGE_VERBOSE"] = "1"
15039                 settings.backup_changes("PORTAGE_VERBOSE")
15040
15041         # Set so that configs will be merged regardless of remembered status
15042         if ("--noconfmem" in myopts):
15043                 settings["NOCONFMEM"]="1"
15044                 settings.backup_changes("NOCONFMEM")
15045
15046         # Set various debug markers... They should be merged somehow.
15047         PORTAGE_DEBUG = 0
15048         try:
15049                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15050                 if PORTAGE_DEBUG not in (0, 1):
15051                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15052                                 PORTAGE_DEBUG, noiselevel=-1)
15053                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15054                                 noiselevel=-1)
15055                         PORTAGE_DEBUG = 0
15056         except ValueError, e:
15057                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15058                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15059                         settings["PORTAGE_DEBUG"], noiselevel=-1)
15060                 del e
15061         if "--debug" in myopts:
15062                 PORTAGE_DEBUG = 1
15063         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15064         settings.backup_changes("PORTAGE_DEBUG")
15065
15066         if settings.get("NOCOLOR") not in ("yes","true"):
15067                 portage.output.havecolor = 1
15068
15069         """The explicit --color < y | n > option overrides the NOCOLOR environment
15070         variable and stdout auto-detection."""
15071         if "--color" in myopts:
15072                 if "y" == myopts["--color"]:
15073                         portage.output.havecolor = 1
15074                         settings["NOCOLOR"] = "false"
15075                 else:
15076                         portage.output.havecolor = 0
15077                         settings["NOCOLOR"] = "true"
15078                 settings.backup_changes("NOCOLOR")
15079         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15080                 portage.output.havecolor = 0
15081                 settings["NOCOLOR"] = "true"
15082                 settings.backup_changes("NOCOLOR")
15083
15084 def apply_priorities(settings):
15085         ionice(settings)
15086         nice(settings)
15087
15088 def nice(settings):
15089         try:
15090                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15091         except (OSError, ValueError), e:
15092                 out = portage.output.EOutput()
15093                 out.eerror("Failed to change nice value to '%s'" % \
15094                         settings["PORTAGE_NICENESS"])
15095                 out.eerror("%s\n" % str(e))
15096
15097 def ionice(settings):
15098
15099         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15100         if ionice_cmd:
15101                 ionice_cmd = shlex.split(ionice_cmd)
15102         if not ionice_cmd:
15103                 return
15104
15105         from portage.util import varexpand
15106         variables = {"PID" : str(os.getpid())}
15107         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15108
15109         try:
15110                 rval = portage.process.spawn(cmd, env=os.environ)
15111         except portage.exception.CommandNotFound:
15112                 # The OS kernel probably doesn't support ionice,
15113                 # so return silently.
15114                 return
15115
15116         if rval != os.EX_OK:
15117                 out = portage.output.EOutput()
15118                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15119                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15120
15121 def display_missing_pkg_set(root_config, set_name):
15122
15123         msg = []
15124         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15125                 "The following sets exist:") % \
15126                 colorize("INFORM", set_name))
15127         msg.append("")
15128
15129         for s in sorted(root_config.sets):
15130                 msg.append("    %s" % s)
15131         msg.append("")
15132
15133         writemsg_level("".join("%s\n" % l for l in msg),
15134                 level=logging.ERROR, noiselevel=-1)
15135
15136 def expand_set_arguments(myfiles, myaction, root_config):
15137         retval = os.EX_OK
15138         setconfig = root_config.setconfig
15139
15140         sets = setconfig.getSets()
15141
15142         # In order to know exactly which atoms/sets should be added to the
15143         # world file, the depgraph performs set expansion later. It will get
15144         # confused about where the atoms came from if it's not allowed to
15145         # expand them itself.
15146         do_not_expand = (None, )
15147         newargs = []
15148         for a in myfiles:
15149                 if a in ("system", "world"):
15150                         newargs.append(SETPREFIX+a)
15151                 else:
15152                         newargs.append(a)
15153         myfiles = newargs
15154         del newargs
15155         newargs = []
15156
15157         # separators for set arguments
15158         ARG_START = "{"
15159         ARG_END = "}"
15160
15161         # WARNING: all operators must be of equal length
15162         IS_OPERATOR = "/@"
15163         DIFF_OPERATOR = "-@"
15164         UNION_OPERATOR = "+@"
15165         
15166         for i in range(0, len(myfiles)):
15167                 if myfiles[i].startswith(SETPREFIX):
15168                         start = 0
15169                         end = 0
15170                         x = myfiles[i][len(SETPREFIX):]
15171                         newset = ""
15172                         while x:
15173                                 start = x.find(ARG_START)
15174                                 end = x.find(ARG_END)
15175                                 if start > 0 and start < end:
15176                                         namepart = x[:start]
15177                                         argpart = x[start+1:end]
15178                                 
15179                                         # TODO: implement proper quoting
15180                                         args = argpart.split(",")
15181                                         options = {}
15182                                         for a in args:
15183                                                 if "=" in a:
15184                                                         k, v  = a.split("=", 1)
15185                                                         options[k] = v
15186                                                 else:
15187                                                         options[a] = "True"
15188                                         setconfig.update(namepart, options)
15189                                         newset += (x[:start-len(namepart)]+namepart)
15190                                         x = x[end+len(ARG_END):]
15191                                 else:
15192                                         newset += x
15193                                         x = ""
15194                         myfiles[i] = SETPREFIX+newset
15195                                 
15196         sets = setconfig.getSets()
15197
15198         # display errors that occured while loading the SetConfig instance
15199         for e in setconfig.errors:
15200                 print colorize("BAD", "Error during set creation: %s" % e)
15201         
15202         # emerge relies on the existance of sets with names "world" and "system"
15203         required_sets = ("world", "system")
15204         missing_sets = []
15205
15206         for s in required_sets:
15207                 if s not in sets:
15208                         missing_sets.append(s)
15209         if missing_sets:
15210                 if len(missing_sets) > 2:
15211                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15212                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15213                 elif len(missing_sets) == 2:
15214                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15215                 else:
15216                         missing_sets_str = '"%s"' % missing_sets[-1]
15217                 msg = ["emerge: incomplete set configuration, " + \
15218                         "missing set(s): %s" % missing_sets_str]
15219                 if sets:
15220                         msg.append("        sets defined: %s" % ", ".join(sets))
15221                 msg.append("        This usually means that '%s'" % \
15222                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15223                 msg.append("        is missing or corrupt.")
15224                 for line in msg:
15225                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15226                 return (None, 1)
15227         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15228
15229         for a in myfiles:
15230                 if a.startswith(SETPREFIX):
15231                         # support simple set operations (intersection, difference and union)
15232                         # on the commandline. Expressions are evaluated strictly left-to-right
15233                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15234                                 expression = a[len(SETPREFIX):]
15235                                 expr_sets = []
15236                                 expr_ops = []
15237                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15238                                         is_pos = expression.rfind(IS_OPERATOR)
15239                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15240                                         union_pos = expression.rfind(UNION_OPERATOR)
15241                                         op_pos = max(is_pos, diff_pos, union_pos)
15242                                         s1 = expression[:op_pos]
15243                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15244                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15245                                         if not s2 in sets:
15246                                                 display_missing_pkg_set(root_config, s2)
15247                                                 return (None, 1)
15248                                         expr_sets.insert(0, s2)
15249                                         expr_ops.insert(0, op)
15250                                         expression = s1
15251                                 if not expression in sets:
15252                                         display_missing_pkg_set(root_config, expression)
15253                                         return (None, 1)
15254                                 expr_sets.insert(0, expression)
15255                                 result = set(setconfig.getSetAtoms(expression))
15256                                 for i in range(0, len(expr_ops)):
15257                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15258                                         if expr_ops[i] == IS_OPERATOR:
15259                                                 result.intersection_update(s2)
15260                                         elif expr_ops[i] == DIFF_OPERATOR:
15261                                                 result.difference_update(s2)
15262                                         elif expr_ops[i] == UNION_OPERATOR:
15263                                                 result.update(s2)
15264                                         else:
15265                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15266                                 newargs.extend(result)
15267                         else:                   
15268                                 s = a[len(SETPREFIX):]
15269                                 if s not in sets:
15270                                         display_missing_pkg_set(root_config, s)
15271                                         return (None, 1)
15272                                 setconfig.active.append(s)
15273                                 try:
15274                                         set_atoms = setconfig.getSetAtoms(s)
15275                                 except portage.exception.PackageSetNotFound, e:
15276                                         writemsg_level(("emerge: the given set '%s' " + \
15277                                                 "contains a non-existent set named '%s'.\n") % \
15278                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15279                                         return (None, 1)
15280                                 if myaction in unmerge_actions and \
15281                                                 not sets[s].supportsOperation("unmerge"):
15282                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15283                                                 "not support unmerge operations\n")
15284                                         retval = 1
15285                                 elif not set_atoms:
15286                                         print "emerge: '%s' is an empty set" % s
15287                                 elif myaction not in do_not_expand:
15288                                         newargs.extend(set_atoms)
15289                                 else:
15290                                         newargs.append(SETPREFIX+s)
15291                                 for e in sets[s].errors:
15292                                         print e
15293                 else:
15294                         newargs.append(a)
15295         return (newargs, retval)
15296
15297 def repo_name_check(trees):
15298         missing_repo_names = set()
15299         for root, root_trees in trees.iteritems():
15300                 if "porttree" in root_trees:
15301                         portdb = root_trees["porttree"].dbapi
15302                         missing_repo_names.update(portdb.porttrees)
15303                         repos = portdb.getRepositories()
15304                         for r in repos:
15305                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15306                         if portdb.porttree_root in missing_repo_names and \
15307                                 not os.path.exists(os.path.join(
15308                                 portdb.porttree_root, "profiles")):
15309                                 # This is normal if $PORTDIR happens to be empty,
15310                                 # so don't warn about it.
15311                                 missing_repo_names.remove(portdb.porttree_root)
15312
15313         if missing_repo_names:
15314                 msg = []
15315                 msg.append("WARNING: One or more repositories " + \
15316                         "have missing repo_name entries:")
15317                 msg.append("")
15318                 for p in missing_repo_names:
15319                         msg.append("\t%s/profiles/repo_name" % (p,))
15320                 msg.append("")
15321                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15322                         "should be a plain text file containing a unique " + \
15323                         "name for the repository on the first line.", 70))
15324                 writemsg_level("".join("%s\n" % l for l in msg),
15325                         level=logging.WARNING, noiselevel=-1)
15326
15327         return bool(missing_repo_names)
15328
15329 def config_protect_check(trees):
15330         for root, root_trees in trees.iteritems():
15331                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15332                         msg = "!!! CONFIG_PROTECT is empty"
15333                         if root != "/":
15334                                 msg += " for '%s'" % root
15335                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15336
15337 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15338
15339         if "--quiet" in myopts:
15340                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15341                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15342                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15343                         print "    " + colorize("INFORM", cp)
15344                 return
15345
15346         s = search(root_config, spinner, "--searchdesc" in myopts,
15347                 "--quiet" not in myopts, "--usepkg" in myopts,
15348                 "--usepkgonly" in myopts)
15349         null_cp = portage.dep_getkey(insert_category_into_atom(
15350                 arg, "null"))
15351         cat, atom_pn = portage.catsplit(null_cp)
15352         s.searchkey = atom_pn
15353         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15354                 s.addCP(cp)
15355         s.output()
15356         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15357         print "!!! one of the above fully-qualified ebuild names instead.\n"
15358
15359 def profile_check(trees, myaction, myopts):
15360         if myaction in ("info", "sync"):
15361                 return os.EX_OK
15362         elif "--version" in myopts or "--help" in myopts:
15363                 return os.EX_OK
15364         for root, root_trees in trees.iteritems():
15365                 if root_trees["root_config"].settings.profiles:
15366                         continue
15367                 # generate some profile related warning messages
15368                 validate_ebuild_environment(trees)
15369                 msg = "If you have just changed your profile configuration, you " + \
15370                         "should revert back to the previous configuration. Due to " + \
15371                         "your current profile being invalid, allowed actions are " + \
15372                         "limited to --help, --info, --sync, and --version."
15373                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15374                         level=logging.ERROR, noiselevel=-1)
15375                 return 1
15376         return os.EX_OK
15377
15378 def emerge_main():
15379         global portage  # NFC why this is necessary now - genone
15380         portage._disable_legacy_globals()
15381         # Disable color until we're sure that it should be enabled (after
15382         # EMERGE_DEFAULT_OPTS has been parsed).
15383         portage.output.havecolor = 0
15384         # This first pass is just for options that need to be known as early as
15385         # possible, such as --config-root.  They will be parsed again later,
15386         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15387         # the value of --config-root).
15388         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15389         if "--debug" in myopts:
15390                 os.environ["PORTAGE_DEBUG"] = "1"
15391         if "--config-root" in myopts:
15392                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15393         if "--root" in myopts:
15394                 os.environ["ROOT"] = myopts["--root"]
15395
15396         # Portage needs to ensure a sane umask for the files it creates.
15397         os.umask(022)
15398         settings, trees, mtimedb = load_emerge_config()
15399         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15400         rval = profile_check(trees, myaction, myopts)
15401         if rval != os.EX_OK:
15402                 return rval
15403
15404         if portage._global_updates(trees, mtimedb["updates"]):
15405                 mtimedb.commit()
15406                 # Reload the whole config from scratch.
15407                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15408                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15409
15410         xterm_titles = "notitles" not in settings.features
15411
15412         tmpcmdline = []
15413         if "--ignore-default-opts" not in myopts:
15414                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15415         tmpcmdline.extend(sys.argv[1:])
15416         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15417
15418         if "--digest" in myopts:
15419                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15420                 # Reload the whole config from scratch so that the portdbapi internal
15421                 # config is updated with new FEATURES.
15422                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15423                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15424
15425         for myroot in trees:
15426                 mysettings =  trees[myroot]["vartree"].settings
15427                 mysettings.unlock()
15428                 adjust_config(myopts, mysettings)
15429                 if '--pretend' not in myopts and myaction in \
15430                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15431                         mysettings["PORTAGE_COUNTER_HASH"] = \
15432                                 trees[myroot]["vartree"].dbapi._counter_hash()
15433                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15434                 mysettings.lock()
15435                 del myroot, mysettings
15436
15437         apply_priorities(settings)
15438
15439         spinner = stdout_spinner()
15440         if "candy" in settings.features:
15441                 spinner.update = spinner.update_scroll
15442
15443         if "--quiet" not in myopts:
15444                 portage.deprecated_profile_check(settings=settings)
15445                 repo_name_check(trees)
15446                 config_protect_check(trees)
15447
15448         eclasses_overridden = {}
15449         for mytrees in trees.itervalues():
15450                 mydb = mytrees["porttree"].dbapi
15451                 # Freeze the portdbapi for performance (memoize all xmatch results).
15452                 mydb.freeze()
15453                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15454         del mytrees, mydb
15455
15456         if eclasses_overridden and \
15457                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15458                 prefix = bad(" * ")
15459                 if len(eclasses_overridden) == 1:
15460                         writemsg(prefix + "Overlay eclass overrides " + \
15461                                 "eclass from PORTDIR:\n", noiselevel=-1)
15462                 else:
15463                         writemsg(prefix + "Overlay eclasses override " + \
15464                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15465                 writemsg(prefix + "\n", noiselevel=-1)
15466                 for eclass_name in sorted(eclasses_overridden):
15467                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15468                                 (eclasses_overridden[eclass_name], eclass_name),
15469                                 noiselevel=-1)
15470                 writemsg(prefix + "\n", noiselevel=-1)
15471                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15472                 "because it will trigger invalidation of cached ebuild metadata " + \
15473                 "that is distributed with the portage tree. If you must " + \
15474                 "override eclasses from PORTDIR then you are advised to add " + \
15475                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15476                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15477                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15478                 "you would like to disable this warning."
15479                 from textwrap import wrap
15480                 for line in wrap(msg, 72):
15481                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15482
15483         if "moo" in myfiles:
15484                 print """
15485
15486   Larry loves Gentoo (""" + platform.system() + """)
15487
15488  _______________________
15489 < Have you mooed today? >
15490  -----------------------
15491         \   ^__^
15492          \  (oo)\_______
15493             (__)\       )\/\ 
15494                 ||----w |
15495                 ||     ||
15496
15497 """
15498
15499         for x in myfiles:
15500                 ext = os.path.splitext(x)[1]
15501                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15502                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15503                         break
15504
15505         root_config = trees[settings["ROOT"]]["root_config"]
15506         if myaction == "list-sets":
15507                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15508                 sys.stdout.flush()
15509                 return os.EX_OK
15510
15511         # only expand sets for actions taking package arguments
15512         oldargs = myfiles[:]
15513         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15514                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15515                 if retval != os.EX_OK:
15516                         return retval
15517
15518                 # Need to handle empty sets specially, otherwise emerge will react 
15519                 # with the help message for empty argument lists
15520                 if oldargs and not myfiles:
15521                         print "emerge: no targets left after set expansion"
15522                         return 0
15523
15524         if ("--tree" in myopts) and ("--columns" in myopts):
15525                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15526                 return 1
15527
15528         if ("--quiet" in myopts):
15529                 spinner.update = spinner.update_quiet
15530                 portage.util.noiselimit = -1
15531
15532         # Always create packages if FEATURES=buildpkg
15533         # Imply --buildpkg if --buildpkgonly
15534         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15535                 if "--buildpkg" not in myopts:
15536                         myopts["--buildpkg"] = True
15537
15538         # Always try and fetch binary packages if FEATURES=getbinpkg
15539         if ("getbinpkg" in settings.features):
15540                 myopts["--getbinpkg"] = True
15541
15542         if "--buildpkgonly" in myopts:
15543                 # --buildpkgonly will not merge anything, so
15544                 # it cancels all binary package options.
15545                 for opt in ("--getbinpkg", "--getbinpkgonly",
15546                         "--usepkg", "--usepkgonly"):
15547                         myopts.pop(opt, None)
15548
15549         if "--fetch-all-uri" in myopts:
15550                 myopts["--fetchonly"] = True
15551
15552         if "--skipfirst" in myopts and "--resume" not in myopts:
15553                 myopts["--resume"] = True
15554
15555         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15556                 myopts["--usepkgonly"] = True
15557
15558         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15559                 myopts["--getbinpkg"] = True
15560
15561         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15562                 myopts["--usepkg"] = True
15563
15564         # Also allow -K to apply --usepkg/-k
15565         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15566                 myopts["--usepkg"] = True
15567
15568         # Allow -p to remove --ask
15569         if ("--pretend" in myopts) and ("--ask" in myopts):
15570                 print ">>> --pretend disables --ask... removing --ask from options."
15571                 del myopts["--ask"]
15572
15573         # forbid --ask when not in a terminal
15574         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15575         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15576                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15577                         noiselevel=-1)
15578                 return 1
15579
15580         if settings.get("PORTAGE_DEBUG", "") == "1":
15581                 spinner.update = spinner.update_quiet
15582                 portage.debug=1
15583                 if "python-trace" in settings.features:
15584                         import portage.debug
15585                         portage.debug.set_trace(True)
15586
15587         if not ("--quiet" in myopts):
15588                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15589                         spinner.update = spinner.update_basic
15590
15591         if myaction == 'version':
15592                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15593                         settings.profile_path, settings["CHOST"],
15594                         trees[settings["ROOT"]]["vartree"].dbapi)
15595                 return 0
15596         elif "--help" in myopts:
15597                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15598                 return 0
15599
15600         if "--debug" in myopts:
15601                 print "myaction", myaction
15602                 print "myopts", myopts
15603
15604         if not myaction and not myfiles and "--resume" not in myopts:
15605                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15606                 return 1
15607
15608         pretend = "--pretend" in myopts
15609         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15610         buildpkgonly = "--buildpkgonly" in myopts
15611
15612         # check if root user is the current user for the actions where emerge needs this
15613         if portage.secpass < 2:
15614                 # We've already allowed "--version" and "--help" above.
15615                 if "--pretend" not in myopts and myaction not in ("search","info"):
15616                         need_superuser = not \
15617                                 (fetchonly or \
15618                                 (buildpkgonly and secpass >= 1) or \
15619                                 myaction in ("metadata", "regen") or \
15620                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15621                         if portage.secpass < 1 or \
15622                                 need_superuser:
15623                                 if need_superuser:
15624                                         access_desc = "superuser"
15625                                 else:
15626                                         access_desc = "portage group"
15627                                 # Always show portage_group_warning() when only portage group
15628                                 # access is required but the user is not in the portage group.
15629                                 from portage.data import portage_group_warning
15630                                 if "--ask" in myopts:
15631                                         myopts["--pretend"] = True
15632                                         del myopts["--ask"]
15633                                         print ("%s access is required... " + \
15634                                                 "adding --pretend to options.\n") % access_desc
15635                                         if portage.secpass < 1 and not need_superuser:
15636                                                 portage_group_warning()
15637                                 else:
15638                                         sys.stderr.write(("emerge: %s access is " + \
15639                                                 "required.\n\n") % access_desc)
15640                                         if portage.secpass < 1 and not need_superuser:
15641                                                 portage_group_warning()
15642                                         return 1
15643
15644         disable_emergelog = False
15645         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15646                 if x in myopts:
15647                         disable_emergelog = True
15648                         break
15649         if myaction in ("search", "info"):
15650                 disable_emergelog = True
15651         if disable_emergelog:
15652                 """ Disable emergelog for everything except build or unmerge
15653                 operations.  This helps minimize parallel emerge.log entries that can
15654                 confuse log parsers.  We especially want it disabled during
15655                 parallel-fetch, which uses --resume --fetchonly."""
15656                 global emergelog
15657                 def emergelog(*pargs, **kargs):
15658                         pass
15659
15660         if not "--pretend" in myopts:
15661                 emergelog(xterm_titles, "Started emerge on: "+\
15662                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15663                 myelogstr=""
15664                 if myopts:
15665                         myelogstr=" ".join(myopts)
15666                 if myaction:
15667                         myelogstr+=" "+myaction
15668                 if myfiles:
15669                         myelogstr += " " + " ".join(oldargs)
15670                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15671         del oldargs
15672
15673         def emergeexitsig(signum, frame):
15674                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15675                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15676                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15677                 sys.exit(100+signum)
15678         signal.signal(signal.SIGINT, emergeexitsig)
15679         signal.signal(signal.SIGTERM, emergeexitsig)
15680
15681         def emergeexit():
15682                 """This gets out final log message in before we quit."""
15683                 if "--pretend" not in myopts:
15684                         emergelog(xterm_titles, " *** terminating.")
15685                 if "notitles" not in settings.features:
15686                         xtermTitleReset()
15687         portage.atexit_register(emergeexit)
15688
15689         if myaction in ("config", "metadata", "regen", "sync"):
15690                 if "--pretend" in myopts:
15691                         sys.stderr.write(("emerge: The '%s' action does " + \
15692                                 "not support '--pretend'.\n") % myaction)
15693                         return 1
15694
15695         if "sync" == myaction:
15696                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15697         elif "metadata" == myaction:
15698                 action_metadata(settings, portdb, myopts)
15699         elif myaction=="regen":
15700                 validate_ebuild_environment(trees)
15701                 return action_regen(settings, portdb, myopts.get("--jobs"),
15702                         myopts.get("--load-average"))
15703         # HELP action
15704         elif "config"==myaction:
15705                 validate_ebuild_environment(trees)
15706                 action_config(settings, trees, myopts, myfiles)
15707
15708         # SEARCH action
15709         elif "search"==myaction:
15710                 validate_ebuild_environment(trees)
15711                 action_search(trees[settings["ROOT"]]["root_config"],
15712                         myopts, myfiles, spinner)
15713         elif myaction in ("clean", "unmerge") or \
15714                 (myaction == "prune" and "--nodeps" in myopts):
15715                 validate_ebuild_environment(trees)
15716
15717                 # Ensure atoms are valid before calling unmerge().
15718                 # For backward compat, leading '=' is not required.
15719                 for x in myfiles:
15720                         if is_valid_package_atom(x) or \
15721                                 is_valid_package_atom("=" + x):
15722                                 continue
15723                         msg = []
15724                         msg.append("'%s' is not a valid package atom." % (x,))
15725                         msg.append("Please check ebuild(5) for full details.")
15726                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15727                                 level=logging.ERROR, noiselevel=-1)
15728                         return 1
15729
15730                 # When given a list of atoms, unmerge
15731                 # them in the order given.
15732                 ordered = myaction == "unmerge"
15733                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15734                         mtimedb["ldpath"], ordered=ordered):
15735                         if not (buildpkgonly or fetchonly or pretend):
15736                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15737
15738         elif myaction in ("depclean", "info", "prune"):
15739
15740                 # Ensure atoms are valid before calling unmerge().
15741                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15742                 valid_atoms = []
15743                 for x in myfiles:
15744                         if is_valid_package_atom(x):
15745                                 try:
15746                                         valid_atoms.append(
15747                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15748                                 except portage.exception.AmbiguousPackageName, e:
15749                                         msg = "The short ebuild name \"" + x + \
15750                                                 "\" is ambiguous.  Please specify " + \
15751                                                 "one of the following " + \
15752                                                 "fully-qualified ebuild names instead:"
15753                                         for line in textwrap.wrap(msg, 70):
15754                                                 writemsg_level("!!! %s\n" % (line,),
15755                                                         level=logging.ERROR, noiselevel=-1)
15756                                         for i in e[0]:
15757                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15758                                                         level=logging.ERROR, noiselevel=-1)
15759                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15760                                         return 1
15761                                 continue
15762                         msg = []
15763                         msg.append("'%s' is not a valid package atom." % (x,))
15764                         msg.append("Please check ebuild(5) for full details.")
15765                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15766                                 level=logging.ERROR, noiselevel=-1)
15767                         return 1
15768
15769                 if myaction == "info":
15770                         return action_info(settings, trees, myopts, valid_atoms)
15771
15772                 validate_ebuild_environment(trees)
15773                 action_depclean(settings, trees, mtimedb["ldpath"],
15774                         myopts, myaction, valid_atoms, spinner)
15775                 if not (buildpkgonly or fetchonly or pretend):
15776                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15777         # "update", "system", or just process files:
15778         else:
15779                 validate_ebuild_environment(trees)
15780
15781                 for x in myfiles:
15782                         if x.startswith(SETPREFIX) or \
15783                                 is_valid_package_atom(x):
15784                                 continue
15785                         if x[:1] == os.sep:
15786                                 continue
15787                         try:
15788                                 os.lstat(x)
15789                                 continue
15790                         except OSError:
15791                                 pass
15792                         msg = []
15793                         msg.append("'%s' is not a valid package atom." % (x,))
15794                         msg.append("Please check ebuild(5) for full details.")
15795                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15796                                 level=logging.ERROR, noiselevel=-1)
15797                         return 1
15798
15799                 if "--pretend" not in myopts:
15800                         display_news_notification(root_config, myopts)
15801                 retval = action_build(settings, trees, mtimedb,
15802                         myopts, myaction, myfiles, spinner)
15803                 root_config = trees[settings["ROOT"]]["root_config"]
15804                 post_emerge(root_config, myopts, mtimedb, retval)
15805
15806                 return retval